diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f4d17b3596b..bb4e2d24c99 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -blank_issues_enabled: false +blank_issues_enabled: true contact_links: - name: Prometheus Community Support url: https://prometheus.io/community/ diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 3f6cf76e16f..bf7f681b696 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,8 +12,8 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 632d38cb009..669305ebd33 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,8 +12,8 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 98d3d9a754f..2714211dd70 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,10 +13,12 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment - - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 + with: + enable_npm: true + - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags="" - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false - run: make -C documentation/examples/remote_storage - run: make -C documentation/examples @@ -27,8 +29,8 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - run: GOARCH=386 go test ./cmd/prometheus @@ -46,7 +48,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: make build # Don't run NPM build; don't run race-detector. - run: make test GO_ONLY=1 test-flags="" @@ -60,8 +62,8 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -77,7 +79,7 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.23.x @@ -94,7 +96,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -110,6 +112,8 @@ jobs: if: | !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) && + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + && !(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')) && !(github.event_name == 'push' && github.event.ref == 'refs/heads/main') @@ -117,8 +121,8 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -130,6 +134,8 @@ jobs: if: | (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + || (github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')) || (github.event_name == 'push' && github.event.ref == 'refs/heads/main') @@ -140,8 +146,8 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: parallelism: 12 @@ -163,7 +169,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -176,7 +182,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -202,8 +208,8 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -214,10 +220,13 @@ jobs: name: Publish release artefacts runs-on: ubuntu-latest needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -231,10 +240,10 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - name: Install nodejs - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" @@ -245,17 +254,26 @@ jobs: restore-keys: | ${{ runner.os }}-node- - name: Check libraries version - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') - run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)" + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + run: ./scripts/ui_release.sh --check-package "$(./scripts/get_module_version.sh ${{ github.ref_name }})" - name: build run: make assets - name: Copy files before publishing libs run: ./scripts/ui_release.sh --copy - name: Publish dry-run libraries - if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))" + if: | + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + && + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) run: ./scripts/ui_release.sh --publish dry-run - name: Publish libraries - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) run: ./scripts/ui_release.sh --publish env: # The setup-node action writes an .npmrc file with this env variable diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 89aa2ba29b1..77fbd4dafb7 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,15 +24,15 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Initialize CodeQL - uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index 8ddbc34aebb..144859486d8 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -18,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -40,7 +40,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index f3953cb2a48..80356e45bf6 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 537e9abd84d..aa306c46d05 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 82cccb2bc13..c63727f7f16 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,7 +21,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0 with: persist-credentials: false @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0 with: name: SARIF file path: results.sarif @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6 + uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10 with: sarif_file: results.sarif diff --git a/.gitignore b/.gitignore index 8363604eb32..484cb36a6f7 100644 --- a/.gitignore +++ b/.gitignore @@ -22,10 +22,7 @@ benchmark.txt /documentation/examples/remote_storage/example_write_adapter/example_write_adapter npm_licenses.tar.bz2 -/web/ui/static/react/* -!/web/ui/static/react/static -/web/ui/static/react/static/js/* -!/web/ui/static/react/static/js/*.gz +/web/ui/static #/vendor /.build diff --git a/.golangci.yml b/.golangci.yml index 303cd33d8b0..c512101e1b1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,6 +23,7 @@ linters: - usestdlibvars - whitespace - loggercheck + - sloglint issues: max-issues-per-linter: 0 @@ -100,8 +101,6 @@ linters-settings: - (net/http.ResponseWriter).Write # No need to check for errors on server's shutdown. - (*net/http.Server).Shutdown - # Never check for logger errors. - - (github.com/go-kit/log.Logger).Log # Never check for rollback errors as Rollback() is called when a previous error was detected. - (github.com/prometheus/prometheus/storage.Appender).Rollback goimports: @@ -153,14 +152,4 @@ linters-settings: disable: - float-compare - go-require - enable: - - bool-compare - - compares - - empty - - error-is-as - - error-nil - - expected-actual - - len - - require-error - - suite-dont-use-pkg - - suite-extra-assert-call + enable-all: true diff --git a/.promu.yml b/.promu.yml index bedbbf6219f..f3d827683d4 100644 --- a/.promu.yml +++ b/.promu.yml @@ -29,8 +29,6 @@ tarball: # Whenever there are new files to include in the tarball, # remember to make sure the new files will be generated after `make build`. files: - - consoles - - console_libraries - documentation/examples/prometheus.yml - LICENSE - NOTICE diff --git a/CHANGELOG.md b/CHANGELOG.md index 19af3f460fc..83d6d4926c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,73 @@ ## unreleased -## 2.55.1 / 2024-01-04 - -* [BUGFIX] `round()` function did not remove `__name__` label. #15250 +## 3.0.1 / 2024-11-28 + +The first bug fix release for Prometheus 3. + +* [BUGFIX] Promql: Make subqueries left open. #15431 +* [BUGFIX] Fix memory leak when query log is enabled. #15434 +* [BUGFIX] Support utf8 names on /v1/label/:name/values endpoint. #15399 + +## 3.0.0 / 2024-11-14 + +This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/). + +* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376 +* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373 +* [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 +* [CHANGE] Remote-write: default enable_http2 to false. #15219 +* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164 +* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178 +* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657 +* [CHANGE] Disallow configuring AM with the v1 api. #13883 +* [CHANGE] regexp `.` now matches all characters (performance improvement). #14505 +* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 +* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894 +* [CHANGE] Prometheus will not add or remove port numbers from the target address. `no-default-scrape-port` feature flag removed. #14160 +* [CHANGE] Logging: the format of log lines has changed a little, along with the adoption of Go's Structured Logging package. #14906 +* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion` is enabled. #14738 +* [CHANGE] Float literals and time durations being the same is now a stable fetaure. #15111 +* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872 +* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 +* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 +* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 +* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705 +* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807 +* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770 +* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 +* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 +* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 +* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384 +* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 +* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 +* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 +* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096 +* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082 +* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677 +* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546 +* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909 +* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929 +* [ENHANCEMENT] Consul SD: Support catalog filters. #11224 +* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975 +* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932 +* [PERF] TSDB: Grow postings by doubling. #14721 +* [PERF] Relabeling: Optimize adding a constant label pair. #12180 +* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 +* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 +* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251 +* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095 +* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 +* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854 +* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884 +* [BUGFIX] PromQL: Unary negation of native histograms. #14821 +* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025 +* [BUGFIX] Autoreload: Reload invalid yaml files. #14947 +* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 ## 2.55.0 / 2024-10-22 @@ -137,7 +201,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754 * [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772 * [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823 -* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838 +* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838 * [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846 * [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667 * [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852 @@ -674,7 +738,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2 ## 2.33.0 / 2022-01-29 -* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121 +* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121 * [CHANGE] Web: Promote remote-write-receiver to stable. #10119 * [FEATURE] Config: Add `stripPort` template function. #10002 * [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045 @@ -911,7 +975,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec. * [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682 * [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659 * [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790 -* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723 +* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723 * [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737 * [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766 @@ -1837,7 +1901,7 @@ information, read the announcement blog post and migration guide. ## 1.7.0 / 2017-06-06 * [CHANGE] Compress remote storage requests and responses with unframed/raw snappy. -* [CHANGE] Properly ellide secrets in config. +* [CHANGE] Properly elide secrets in config. * [FEATURE] Add OpenStack service discovery. * [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces. * [FEATURE] Add metric for discovered number of Alertmanagers. diff --git a/Dockerfile b/Dockerfile index b47f77dcd69..b96b3b765d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,21 +8,16 @@ ARG OS="linux" COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus COPY .build/${OS}-${ARCH}/promtool /bin/promtool COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml -COPY console_libraries/ /usr/share/prometheus/console_libraries/ -COPY consoles/ /usr/share/prometheus/consoles/ COPY LICENSE /LICENSE COPY NOTICE /NOTICE COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2 WORKDIR /prometheus -RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \ - chown -R nobody:nobody /etc/prometheus /prometheus +RUN chown -R nobody:nobody /etc/prometheus /prometheus USER nobody EXPOSE 9090 VOLUME [ "/prometheus" ] ENTRYPOINT [ "/bin/prometheus" ] CMD [ "--config.file=/etc/prometheus/prometheus.yml", \ - "--storage.tsdb.path=/prometheus", \ - "--web.console.libraries=/usr/share/prometheus/console_libraries", \ - "--web.console.templates=/usr/share/prometheus/consoles" ] + "--storage.tsdb.path=/prometheus" ] diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 3661ddaa0a1..de3f3c73b76 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -2,7 +2,6 @@ General maintainers: * Bryan Boreham (bjboreham@gmail.com / @bboreham) -* Levi Harrison (levi@leviharrison.dev / @LeviHarrison) * Ayoub Mrini (ayoubmrini424@gmail.com / @machine424) * Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie) @@ -13,13 +12,12 @@ Maintainers for specific parts of the codebase: * `k8s`: Frederic Branczyk ( / @brancz) * `documentation` * `prometheus-mixin`: Matthias Loibl ( / @metalmatze) -* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), +* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), George Krajcsovits ( / @krajorama) * `storage` * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank) - * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) + * `otlptranslator`: Arthur Silva Sens ( / @ArthurSens), Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) - * `agent`: Robert Fratto ( / @rfratto) * `web` * `ui`: Julius Volz ( / @juliusv) * `module`: Augustin Husson ( @nexucis) diff --git a/Makefile b/Makefile index f2bb3fcb7a5..0b5935de003 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,11 @@ include Makefile.common DOCKER_IMAGE_NAME ?= prometheus +# Only build UI if PREBUILT_ASSETS_STATIC_DIR is not set +ifdef PREBUILT_ASSETS_STATIC_DIR + SKIP_UI_BUILD = true +endif + .PHONY: update-npm-deps update-npm-deps: @echo ">> updating npm dependencies" @@ -42,13 +47,17 @@ upgrade-npm-deps: .PHONY: ui-bump-version ui-bump-version: - version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}" + version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}" cd web/ui && npm install git add "./web/ui/package-lock.json" "./**/package.json" .PHONY: ui-install ui-install: cd $(UI_PATH) && npm install + # The old React app has been separated from the npm workspaces setup to avoid + # issues with conflicting dependencies. This is a temporary solution until the + # new Mantine-based UI is fully integrated and the old app can be removed. + cd $(UI_PATH)/react-app && npm install .PHONY: ui-build ui-build: @@ -65,10 +74,30 @@ ui-test: .PHONY: ui-lint ui-lint: cd $(UI_PATH) && npm run lint + # The old React app has been separated from the npm workspaces setup to avoid + # issues with conflicting dependencies. This is a temporary solution until the + # new Mantine-based UI is fully integrated and the old app can be removed. + cd $(UI_PATH)/react-app && npm run lint .PHONY: assets +ifndef SKIP_UI_BUILD assets: ui-install ui-build +.PHONY: npm_licenses +npm_licenses: ui-install + @echo ">> bundling npm licenses" + rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses + ln -s . npm_licenses + find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=- + rm -f npm_licenses +else +assets: + @echo '>> skipping assets build, pre-built assets provided' + +npm_licenses: + @echo '>> skipping assets npm licenses, pre-built assets provided' +endif + .PHONY: assets-compress assets-compress: assets @echo '>> compressing assets' @@ -117,14 +146,6 @@ else test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version endif -.PHONY: npm_licenses -npm_licenses: ui-install - @echo ">> bundling npm licenses" - rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses - ln -s . npm_licenses - find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=- - rm -f npm_licenses - .PHONY: tarball tarball: npm_licenses common-tarball diff --git a/Makefile.common b/Makefile.common index 5d3da8f5cf4..7ec3461bc9c 100644 --- a/Makefile.common +++ b/Makefile.common @@ -307,3 +307,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/README.md b/README.md index df974e1097b..7528147b0ec 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ The Makefile provides several targets: Prometheus is bundled with many service discovery plugins. When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml) -file to disable some service discoveries. The file is a yaml-formated list of go +file to disable some service discoveries. The file is a yaml-formatted list of go import path that will be built into the Prometheus binary. After you have changed the file, you diff --git a/RELEASE.md b/RELEASE.md index b978a3c2261..8e78a6a3ec0 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -188,7 +188,7 @@ the Prometheus server, we use major version zero releases for the libraries. Tag the new library release via the following commands: ```bash -tag="v$(sed s/2/0/ < VERSION)" +tag="v$(./scripts/get_module_version.sh)" git tag -s "${tag}" -m "${tag}" git push origin "${tag}" ``` diff --git a/VERSION b/VERSION index 0a756ea0a78..cb2b00e4f7a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.55.1 +3.0.1 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index b3bcb78b78b..bcfbe24a6ae 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -18,11 +18,11 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "math/bits" "net" "net/http" - _ "net/http/pprof" // Comment this line to disable pprof endpoint. "net/url" "os" "os/signal" @@ -38,8 +38,6 @@ import ( "github.com/KimMachineGun/automemlimit/memlimit" "github.com/alecthomas/kingpin/v2" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/mwitkow/go-conntrack" "github.com/oklog/run" @@ -47,8 +45,8 @@ import ( "github.com/prometheus/client_golang/prometheus/collectors" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" - promlogflag "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/promslog" + promslogflag "github.com/prometheus/common/promslog/flag" "github.com/prometheus/common/version" toolkit_web "github.com/prometheus/exporter-toolkit/web" "go.uber.org/atomic" @@ -58,8 +56,6 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/legacymanager" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -79,10 +75,50 @@ import ( "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/logging" + "github.com/prometheus/prometheus/util/notifications" prom_runtime "github.com/prometheus/prometheus/util/runtime" "github.com/prometheus/prometheus/web" ) +// klogv1OutputCallDepth is the stack depth where we can find the origin of this call. +const klogv1OutputCallDepth = 6 + +// klogv1DefaultPrefixLength is the length of the log prefix that we have to strip out. +const klogv1DefaultPrefixLength = 53 + +// klogv1Writer is used in SetOutputBySeverity call below to redirect any calls +// to klogv1 to end up in klogv2. +// This is a hack to support klogv1 without use of go-kit/log. It is inspired +// by klog's upstream klogv1/v2 coexistence example: +// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go +type klogv1Writer struct{} + +// Write redirects klogv1 calls to klogv2. +// This is a hack to support klogv1 without use of go-kit/log. It is inspired +// by klog's upstream klogv1/v2 coexistence example: +// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go +func (kw klogv1Writer) Write(p []byte) (n int, err error) { + if len(p) < klogv1DefaultPrefixLength { + klogv2.InfoDepth(klogv1OutputCallDepth, string(p)) + return len(p), nil + } + + switch p[0] { + case 'I': + klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'W': + klogv2.WarningDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'E': + klogv2.ErrorDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'F': + klogv2.FatalDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + default: + klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + } + + return len(p), nil +} + var ( appName = "prometheus" @@ -103,6 +139,8 @@ var ( ) func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_"))) var err error @@ -135,123 +173,109 @@ func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCla type flagConfig struct { configFile string - agentStoragePath string - serverStoragePath string - notifier notifier.Options - forGracePeriod model.Duration - outageTolerance model.Duration - resendDelay model.Duration - maxConcurrentEvals int64 - web web.Options - scrape scrape.Options - tsdb tsdbOptions - agent agentOptions - lookbackDelta model.Duration - webTimeout model.Duration - queryTimeout model.Duration - queryConcurrency int - queryMaxSamples int - RemoteFlushDeadline model.Duration - nameEscapingScheme string - - featureList []string - memlimitRatio float64 + agentStoragePath string + serverStoragePath string + notifier notifier.Options + forGracePeriod model.Duration + outageTolerance model.Duration + resendDelay model.Duration + maxConcurrentEvals int64 + web web.Options + scrape scrape.Options + tsdb tsdbOptions + agent agentOptions + lookbackDelta model.Duration + webTimeout model.Duration + queryTimeout model.Duration + queryConcurrency int + queryMaxSamples int + RemoteFlushDeadline model.Duration + nameEscapingScheme string + maxNotificationsSubscribers int + + enableAutoReload bool + autoReloadInterval model.Duration + + maxprocsEnable bool + memlimitEnable bool + memlimitRatio float64 + + featureList []string // These options are extracted from featureList // for ease of use. - enableExpandExternalLabels bool - enableNewSDManager bool - enablePerStepStats bool - enableAutoGOMAXPROCS bool - enableAutoGOMEMLIMIT bool - enableConcurrentRuleEval bool + enablePerStepStats bool + enableConcurrentRuleEval bool prometheusURL string corsRegexString string - promlogConfig promlog.Config - promqlEnableDelayedNameRemoval bool + + promslogConfig promslog.Config } // setFeatureListOptions sets the corresponding options from the featureList. -func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { +func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { for _, f := range c.featureList { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "remote-write-receiver": - c.web.EnableRemoteWriteReceiver = true - level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.") - case "otlp-write-receiver": - c.web.EnableOTLPWriteReceiver = true - level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled") - case "expand-external-labels": - c.enableExpandExternalLabels = true - level.Info(logger).Log("msg", "Experimental expand-external-labels enabled") case "exemplar-storage": c.tsdb.EnableExemplarStorage = true - level.Info(logger).Log("msg", "Experimental in-memory exemplar storage enabled") + logger.Info("Experimental in-memory exemplar storage enabled") case "memory-snapshot-on-shutdown": c.tsdb.EnableMemorySnapshotOnShutdown = true - level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled") + logger.Info("Experimental memory snapshot on shutdown enabled") case "extra-scrape-metrics": c.scrape.ExtraMetrics = true - level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled") + logger.Info("Experimental additional scrape metrics enabled") case "metadata-wal-records": c.scrape.AppendMetadata = true - level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0") - case "new-service-discovery-manager": - c.enableNewSDManager = true - level.Info(logger).Log("msg", "Experimental service discovery manager") - case "agent": - agentMode = true - level.Info(logger).Log("msg", "Experimental agent mode enabled.") + logger.Info("Experimental metadata records in WAL enabled, required for remote write 2.0") case "promql-per-step-stats": c.enablePerStepStats = true - level.Info(logger).Log("msg", "Experimental per-step statistics reporting") - case "auto-gomaxprocs": - c.enableAutoGOMAXPROCS = true - level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota") - case "auto-gomemlimit": - c.enableAutoGOMEMLIMIT = true - level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit") + logger.Info("Experimental per-step statistics reporting") + case "auto-reload-config": + c.enableAutoReload = true + if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 { + c.autoReloadInterval, _ = model.ParseDuration("1s") + } + logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval) case "concurrent-rule-eval": c.enableConcurrentRuleEval = true - level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") - case "no-default-scrape-port": - c.scrape.NoDefaultPort = true - level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.") + logger.Info("Experimental concurrent rule evaluation enabled.") case "promql-experimental-functions": parser.EnableExperimentalFunctions = true - level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") + logger.Info("Experimental PromQL functions enabled.") case "native-histograms": c.tsdb.EnableNativeHistograms = true c.scrape.EnableNativeHistogramsIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols - level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + logger.Info("Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "ooo-native-histograms": + c.tsdb.EnableOOONativeHistograms = true + logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true") case "created-timestamp-zero-ingestion": c.scrape.EnableCreatedTimestampZeroIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols - level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) case "delayed-compaction": c.tsdb.EnableDelayedCompaction = true - level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.") + logger.Info("Experimental delayed compaction is enabled.") case "promql-delayed-name-removal": c.promqlEnableDelayedNameRemoval = true - level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.") - case "utf8-names": - model.NameValidationScheme = model.UTF8Validation - level.Info(logger).Log("msg", "Experimental UTF-8 support enabled") + logger.Info("Experimental PromQL delayed name removal enabled.") case "": continue - case "promql-at-modifier", "promql-negative-offset": - level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o) + case "old-ui": + c.web.UseOldUI = true + logger.Info("Serving previous version of the Prometheus web UI.") default: - level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) + logger.Warn("Unknown option for --enable-feature", "option", o) } } } @@ -265,11 +289,6 @@ func main() { runtime.SetMutexProfileFraction(20) } - var ( - oldFlagRetentionDuration model.Duration - newFlagRetentionDuration model.Duration - ) - // Unregister the default GoCollector, and reregister with our defaults. if prometheus.Unregister(collectors.NewGoCollector()) { prometheus.MustRegister( @@ -290,7 +309,7 @@ func main() { Registerer: prometheus.DefaultRegisterer, Gatherer: prometheus.DefaultGatherer, }, - promlogConfig: promlog.Config{}, + promslogConfig: promslog.Config{}, } a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout) @@ -302,9 +321,16 @@ func main() { a.Flag("config.file", "Prometheus configuration file path."). Default("prometheus.yml").StringVar(&cfg.configFile) + a.Flag("config.auto-reload-interval", "Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes."). + Default("30s").SetValue(&cfg.autoReloadInterval) + a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) + a.Flag("auto-gomaxprocs", "Automatically set GOMAXPROCS to match Linux container CPU quota"). + Default("true").BoolVar(&cfg.maxprocsEnable) + a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit"). + Default("true").BoolVar(&cfg.memlimitEnable) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). Default("0.9").FloatVar(&cfg.memlimitRatio) @@ -320,6 +346,9 @@ func main() { a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners."). Default("512").IntVar(&cfg.web.MaxConnections) + a.Flag("web.max-notifications-subscribers", "Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close."). + Default("16").IntVar(&cfg.maxNotificationsSubscribers) + a.Flag("web.external-url", "The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically."). PlaceHolder("").StringVar(&cfg.prometheusURL) @@ -346,6 +375,9 @@ func main() { a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())). Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs)) + a.Flag("web.enable-otlp-receiver", "Enable API endpoint accepting OTLP write requests."). + Default("false").BoolVar(&cfg.web.EnableOTLPWriteReceiver) + a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) @@ -376,11 +408,8 @@ func main() { "Size at which to split the tsdb WAL segment files. Example: 100MB"). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.WALSegmentSize) - serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead."). - SetValue(&oldFlagRetentionDuration) - - serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). - SetValue(&newFlagRetentionDuration) + serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. If neither this flag nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). + SetValue(&cfg.tsdb.RetentionDuration) serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B."). BytesVar(&cfg.tsdb.MaxBytes) @@ -388,11 +417,6 @@ func main() { serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). Default("false").BoolVar(&cfg.tsdb.NoLockfile) - // TODO: Remove in Prometheus 3.0. - var b bool - serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now."). - Default("true").Hidden().BoolVar(&b) - serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks."). Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction) @@ -408,6 +432,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk."). Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk) + serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled."). + Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent) + agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). Default("data-agent/").StringVar(&cfg.agentStoragePath) @@ -472,9 +499,6 @@ func main() { serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down."). Default("true").BoolVar(&cfg.notifier.DrainOnShutdown) - // TODO: Remove in Prometheus 3.0. - alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String() - serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation."). Default("5m").SetValue(&cfg.lookbackDelta) @@ -490,12 +514,12 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme) - - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) - promlogflag.AddFlags(a, &cfg.promlogConfig) + a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) + + promslogflag.AddFlags(a, &cfg.promslogConfig) a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil { @@ -513,7 +537,13 @@ func main() { os.Exit(2) } - logger := promlog.New(&cfg.promlogConfig) + logger := promslog.New(&cfg.promslogConfig) + slog.SetDefault(logger) + + notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) + cfg.web.NotificationsSub = notifs.Sub + cfg.web.NotificationsGetter = notifs.Get + notifs.AddNotification(notifications.StartingUp) if err := cfg.setFeatureListOptions(logger); err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) @@ -561,18 +591,14 @@ func main() { os.Exit(2) } - if *alertmanagerTimeout != "" { - level.Warn(logger).Log("msg", "The flag --alertmanager.timeout has no effect and will be removed in the future.") - } - // Throw error for invalid config before starting other components. var cfgFile *config.Config - if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { + if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, promslog.NewNopLogger()); err != nil { absPath, pathErr := filepath.Abs(cfg.configFile) if pathErr != nil { absPath = cfg.configFile } - level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) + logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } if _, err := cfgFile.GetScrapeConfigs(); err != nil { @@ -580,7 +606,7 @@ func main() { if pathErr != nil { absPath = cfg.configFile } - level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) + logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } if cfg.tsdb.EnableExemplarStorage { @@ -611,20 +637,9 @@ func main() { cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") if !agentMode { - // Time retention settings. - if oldFlagRetentionDuration != 0 { - level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.") - cfg.tsdb.RetentionDuration = oldFlagRetentionDuration - } - - // When the new flag is set it takes precedence. - if newFlagRetentionDuration != 0 { - cfg.tsdb.RetentionDuration = newFlagRetentionDuration - } - if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { cfg.tsdb.RetentionDuration = defaultRetentionDuration - level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) + logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) } // Check for overflows. This limits our max retention to 100y. @@ -634,7 +649,7 @@ func main() { panic(err) } cfg.tsdb.RetentionDuration = y - level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String()) + logger.Warn("Time retention value is too high. Limiting to: " + y.String()) } // Max block size settings. @@ -650,16 +665,19 @@ func main() { cfg.tsdb.MaxBlockDuration = maxBlockDuration } + + // Delayed compaction checks + if cfg.tsdb.EnableDelayedCompaction && (cfg.tsdb.CompactionDelayMaxPercent > 100 || cfg.tsdb.CompactionDelayMaxPercent <= 0) { + logger.Warn("The --storage.tsdb.delayed-compaction.max-percent should have a value between 1 and 100. Using default", "default", tsdb.DefaultCompactionDelayMaxPercent) + cfg.tsdb.CompactionDelayMaxPercent = tsdb.DefaultCompactionDelayMaxPercent + } } noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{} noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval) - // Above level 6, the k8s client would log bearer tokens in clear-text. - klog.ClampLevel(6) - klog.SetLogger(log.With(logger, "component", "k8s_client_runtime")) - klogv2.ClampLevel(6) - klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime")) + klogv2.SetSlogLogger(logger.With("component", "k8s_client_runtime")) + klog.SetOutputBySeverity("INFO", klogv1Writer{}) modeAppName := "Prometheus Server" mode := "server" @@ -668,20 +686,22 @@ func main() { mode = "agent" } - level.Info(logger).Log("msg", "Starting "+modeAppName, "mode", mode, "version", version.Info()) + logger.Info("Starting "+modeAppName, "mode", mode, "version", version.Info()) if bits.UintSize < 64 { - level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH) + logger.Warn("This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH) } - level.Info(logger).Log("build_context", version.BuildContext()) - level.Info(logger).Log("host_details", prom_runtime.Uname()) - level.Info(logger).Log("fd_limits", prom_runtime.FdLimits()) - level.Info(logger).Log("vm_limits", prom_runtime.VMLimits()) + logger.Info("operational information", + "build_context", version.BuildContext(), + "host_details", prom_runtime.Uname(), + "fd_limits", prom_runtime.FdLimits(), + "vm_limits", prom_runtime.VMLimits(), + ) var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} - remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) + remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) @@ -689,12 +709,12 @@ func main() { ctxWeb, cancelWeb = context.WithCancel(context.Background()) ctxRule = context.Background() - notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) + notifierManager = notifier.NewManager(&cfg.notifier, logger.With("component", "notifier")) ctxScrape, cancelScrape = context.WithCancel(context.Background()) ctxNotify, cancelNotify = context.WithCancel(context.Background()) - discoveryManagerScrape discoveryManager - discoveryManagerNotify discoveryManager + discoveryManagerScrape *discovery.Manager + discoveryManagerNotify *discovery.Manager ) // Kubernetes client metrics are used by Kubernetes SD. @@ -704,63 +724,37 @@ func main() { // they are not specific to an SD instance. err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer) if err != nil { - level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err) + logger.Error("failed to register Kubernetes client metrics", "err", err) os.Exit(1) } sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer) if err != nil { - level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) + logger.Error("failed to register service discovery metrics", "err", err) os.Exit(1) } - if cfg.enableNewSDManager { - { - discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } - - { - discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } - } else { - { - discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } + discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) + if discoveryManagerScrape == nil { + logger.Error("failed to create a discovery manager scrape") + os.Exit(1) + } - { - discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } + discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) + if discoveryManagerNotify == nil { + logger.Error("failed to create a discovery manager notify") + os.Exit(1) } scrapeManager, err := scrape.NewManager( &cfg.scrape, - log.With(logger, "component", "scrape manager"), - func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) }, + logger.With("component", "scrape manager"), + logging.NewJSONFileLogger, fanoutStorage, prometheus.DefaultRegisterer, ) if err != nil { - level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err) + logger.Error("failed to create a scrape manager", "err", err) os.Exit(1) } @@ -771,16 +765,16 @@ func main() { ruleManager *rules.Manager ) - if cfg.enableAutoGOMAXPROCS { + if cfg.maxprocsEnable { l := func(format string, a ...interface{}) { - level.Info(logger).Log("component", "automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...)) + logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") } if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { - level.Warn(logger).Log("component", "automaxprocs", "msg", "Failed to set GOMAXPROCS automatically", "err", err) + logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err) } } - if cfg.enableAutoGOMEMLIMIT { + if cfg.memlimitEnable { if _, err := memlimit.SetGoMemLimitWithOpts( memlimit.WithRatio(cfg.memlimitRatio), memlimit.WithProvider( @@ -790,17 +784,17 @@ func main() { ), ), ); err != nil { - level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) + logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) } } if !agentMode { opts := promql.EngineOpts{ - Logger: log.With(logger, "component", "query engine"), + Logger: logger.With("component", "query engine"), Reg: prometheus.DefaultRegisterer, MaxSamples: cfg.queryMaxSamples, Timeout: time.Duration(cfg.queryTimeout), - ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")), + ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, logger.With("component", "activeQueryTracker")), LookbackDelta: time.Duration(cfg.lookbackDelta), NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, // EnableAtModifier and EnableNegativeOffset have to be @@ -821,7 +815,7 @@ func main() { Context: ctxRule, ExternalURL: cfg.web.ExternalURL, Registerer: prometheus.DefaultRegisterer, - Logger: log.With(logger, "component", "rule manager"), + Logger: logger.With("component", "rule manager"), OutageTolerance: time.Duration(cfg.outageTolerance), ForGracePeriod: time.Duration(cfg.forGracePeriod), ResendDelay: time.Duration(cfg.resendDelay), @@ -872,7 +866,7 @@ func main() { } // Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager. - webHandler := web.New(log.With(logger, "component", "web"), &cfg.web) + webHandler := web.New(logger.With("component", "web"), &cfg.web) // Monitor outgoing connections on default transport with conntrack. http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc( @@ -999,18 +993,18 @@ func main() { listeners, err := webHandler.Listeners() if err != nil { - level.Error(logger).Log("msg", "Unable to start web listeners", "err", err) + logger.Error("Unable to start web listener", "err", err) if err := queryEngine.Close(); err != nil { - level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + logger.Warn("Closing query engine failed", "err", err) } os.Exit(1) } err = toolkit_web.Validate(*webConfig) if err != nil { - level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err) + logger.Error("Unable to validate web configuration file", "err", err) if err := queryEngine.Close(); err != nil { - level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + logger.Warn("Closing query engine failed", "err", err) } os.Exit(1) } @@ -1026,21 +1020,22 @@ func main() { // Don't forget to release the reloadReady channel so that waiting blocks can exit normally. select { case sig := <-term: - level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String()) + logger.Warn("Received an OS signal, exiting gracefully...", "signal", sig.String()) reloadReady.Close() case <-webHandler.Quit(): - level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") + logger.Warn("Received termination request via web service, exiting gracefully...") case <-cancel: reloadReady.Close() } if err := queryEngine.Close(); err != nil { - level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + logger.Warn("Closing query engine failed", "err", err) } return nil }, func(err error) { close(cancel) - webHandler.SetReady(false) + webHandler.SetReady(web.Stopping) + notifs.AddNotification(notifications.ShuttingDown) }, ) } @@ -1049,11 +1044,11 @@ func main() { g.Add( func() error { err := discoveryManagerScrape.Run() - level.Info(logger).Log("msg", "Scrape discovery manager stopped") + logger.Info("Scrape discovery manager stopped") return err }, func(err error) { - level.Info(logger).Log("msg", "Stopping scrape discovery manager...") + logger.Info("Stopping scrape discovery manager...") cancelScrape() }, ) @@ -1063,11 +1058,11 @@ func main() { g.Add( func() error { err := discoveryManagerNotify.Run() - level.Info(logger).Log("msg", "Notify discovery manager stopped") + logger.Info("Notify discovery manager stopped") return err }, func(err error) { - level.Info(logger).Log("msg", "Stopping notify discovery manager...") + logger.Info("Stopping notify discovery manager...") cancelNotify() }, ) @@ -1096,7 +1091,7 @@ func main() { <-reloadReady.C err := scrapeManager.Run(discoveryManagerScrape.SyncCh()) - level.Info(logger).Log("msg", "Scrape manager stopped") + logger.Info("Scrape manager stopped") return err }, func(err error) { @@ -1104,7 +1099,7 @@ func main() { // so that it doesn't try to write samples to a closed storage. // We should also wait for rule manager to be fully stopped to ensure // we don't trigger any false positive alerts for rules using absent(). - level.Info(logger).Log("msg", "Stopping scrape manager...") + logger.Info("Stopping scrape manager...") scrapeManager.Stop() }, ) @@ -1130,6 +1125,23 @@ func main() { hup := make(chan os.Signal, 1) signal.Notify(hup, syscall.SIGHUP) cancel := make(chan struct{}) + + var checksum string + if cfg.enableAutoReload { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { + logger.Error("Failed to generate initial checksum for configuration file", "err", err) + } + } + + callback := func(success bool) { + if success { + notifs.DeleteNotification(notifications.ConfigurationUnsuccessful) + return + } + notifs.AddNotification(notifications.ConfigurationUnsuccessful) + } + g.Add( func() error { <-reloadReady.C @@ -1137,15 +1149,44 @@ func main() { for { select { case <-hup: - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { - level.Error(logger).Log("msg", "Error reloading config", "err", err) + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { + logger.Error("Error reloading config", "err", err) + } else if cfg.enableAutoReload { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { + logger.Error("Failed to generate checksum during configuration reload", "err", err) + } } case rc := <-webHandler.Reload(): - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { - level.Error(logger).Log("msg", "Error reloading config", "err", err) + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { + logger.Error("Error reloading config", "err", err) rc <- err } else { rc <- nil + if cfg.enableAutoReload { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { + logger.Error("Failed to generate checksum during configuration reload", "err", err) + } + } + } + case <-time.Tick(time.Duration(cfg.autoReloadInterval)): + if !cfg.enableAutoReload { + continue + } + currentChecksum, err := config.GenerateChecksum(cfg.configFile) + if err != nil { + checksum = currentChecksum + logger.Error("Failed to generate checksum during configuration reload", "err", err) + } else if currentChecksum == checksum { + continue + } + logger.Info("Configuration file change detected, reloading the configuration.") + + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { + logger.Error("Error reloading config", "err", err) + } else { + checksum = currentChecksum } case <-cancel: return nil @@ -1172,14 +1213,15 @@ func main() { return nil } - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil { return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err) } reloadReady.Close() - webHandler.SetReady(true) - level.Info(logger).Log("msg", "Server is ready to receive web requests.") + webHandler.SetReady(web.Ready) + notifs.DeleteNotification(notifications.StartingUp) + logger.Info("Server is ready to receive web requests.") <-cancel return nil }, @@ -1194,7 +1236,7 @@ func main() { cancel := make(chan struct{}) g.Add( func() error { - level.Info(logger).Log("msg", "Starting TSDB ...") + logger.Info("Starting TSDB ...") if cfg.tsdb.WALSegmentSize != 0 { if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 { return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB") @@ -1213,13 +1255,13 @@ func main() { switch fsType := prom_runtime.Statfs(localStoragePath); fsType { case "NFS_SUPER_MAGIC": - level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType) default: - level.Info(logger).Log("fs_type", fsType) + logger.Info("filesystem information", "fs_type", fsType) } - level.Info(logger).Log("msg", "TSDB started") - level.Debug(logger).Log("msg", "TSDB options", + logger.Info("TSDB started") + logger.Debug("TSDB options", "MinBlockDuration", cfg.tsdb.MinBlockDuration, "MaxBlockDuration", cfg.tsdb.MaxBlockDuration, "MaxBytes", cfg.tsdb.MaxBytes, @@ -1238,7 +1280,7 @@ func main() { }, func(err error) { if err := fanoutStorage.Close(); err != nil { - level.Error(logger).Log("msg", "Error stopping storage", "err", err) + logger.Error("Error stopping storage", "err", err) } close(cancel) }, @@ -1250,7 +1292,7 @@ func main() { cancel := make(chan struct{}) g.Add( func() error { - level.Info(logger).Log("msg", "Starting WAL storage ...") + logger.Info("Starting WAL storage ...") if cfg.agent.WALSegmentSize != 0 { if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 { return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB") @@ -1269,13 +1311,13 @@ func main() { switch fsType := prom_runtime.Statfs(localStoragePath); fsType { case "NFS_SUPER_MAGIC": - level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") default: - level.Info(logger).Log("fs_type", fsType) + logger.Info(fsType) } - level.Info(logger).Log("msg", "Agent WAL storage started") - level.Debug(logger).Log("msg", "Agent WAL storage options", + logger.Info("Agent WAL storage started") + logger.Debug("Agent WAL storage options", "WALSegmentSize", cfg.agent.WALSegmentSize, "WALCompression", cfg.agent.WALCompression, "StripeSize", cfg.agent.StripeSize, @@ -1293,7 +1335,7 @@ func main() { }, func(e error) { if err := fanoutStorage.Close(); err != nil { - level.Error(logger).Log("msg", "Error stopping storage", "err", err) + logger.Error("Error stopping storage", "err", err) } close(cancel) }, @@ -1327,7 +1369,7 @@ func main() { <-reloadReady.C notifierManager.Run(discoveryManagerNotify.SyncCh()) - level.Info(logger).Log("msg", "Notifier manager stopped") + logger.Info("Notifier manager stopped") return nil }, func(err error) { @@ -1336,16 +1378,16 @@ func main() { ) } if err := g.Run(); err != nil { - level.Error(logger).Log("err", err) + logger.Error("Error running goroutines from run.Group", "err", err) os.Exit(1) } - level.Info(logger).Log("msg", "See you next time!") + logger.Info("See you next time!") } -func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) { +func openDBWithMetrics(dir string, logger *slog.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) { db, err := tsdb.Open( dir, - log.With(logger, "component", "tsdb"), + logger.With("component", "tsdb"), reg, opts, stats, @@ -1398,21 +1440,23 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { +func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { start := time.Now() - timings := []interface{}{} - level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) + timingsLogger := logger + logger.Info("Loading configuration file", "filename", filename) defer func() { if err == nil { configSuccess.Set(1) configSuccessTime.SetToCurrentTime() + callback(true) } else { configSuccess.Set(0) + callback(false) } }() - conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger) + conf, err := config.LoadFile(filename, agentMode, logger) if err != nil { return fmt.Errorf("couldn't load configuration (--config.file=%q): %w", filename, err) } @@ -1427,10 +1471,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b for _, rl := range rls { rstart := time.Now() if err := rl.reloader(conf); err != nil { - level.Error(logger).Log("msg", "Failed to apply configuration", "err", err) + logger.Error("Failed to apply configuration", "err", err) failed = true } - timings = append(timings, rl.name, time.Since(rstart)) + timingsLogger = timingsLogger.With((rl.name), time.Since(rstart)) } if failed { return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) @@ -1438,7 +1482,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) if oldGoGC != conf.Runtime.GoGC { - level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) + logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) } // Write the new setting out to the ENV var for runtime API output. if conf.Runtime.GoGC >= 0 { @@ -1448,8 +1492,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b } noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) - l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)} - level.Info(logger).Log(append(l, timings...)...) + timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)) return nil } @@ -1603,6 +1646,9 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender { type notReadyAppender struct{} +// SetOptions does nothing in this appender implementation. +func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {} + func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } @@ -1615,6 +1661,10 @@ func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels return 0, tsdb.ErrNotReady } +func (n notReadyAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, tsdb.ErrNotReady +} + func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } @@ -1753,7 +1803,9 @@ type tsdbOptions struct { EnableMemorySnapshotOnShutdown bool EnableNativeHistograms bool EnableDelayedCompaction bool + CompactionDelayMaxPercent int EnableOverlappingCompaction bool + EnableOOONativeHistograms bool } func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { @@ -1773,8 +1825,10 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { MaxExemplars: opts.MaxExemplars, EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, EnableNativeHistograms: opts.EnableNativeHistograms, + EnableOOONativeHistograms: opts.EnableOOONativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, EnableDelayedCompaction: opts.EnableDelayedCompaction, + CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent, EnableOverlappingCompaction: opts.EnableOverlappingCompaction, } } @@ -1808,15 +1862,6 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option } } -// discoveryManager interfaces the discovery manager. This is used to keep using -// the manager that restarts SD's on reload for a few releases until we feel -// the new manager can be enabled for all users. -type discoveryManager interface { - ApplyConfig(cfg map[string]discovery.Configs) error - Run() error - SyncCh() <-chan map[string][]*targetgroup.Group -} - // rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum. type rwProtoMsgFlagParser struct { msgs *[]config.RemoteWriteProtoMsg diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index c827812e607..4bd1c71b2de 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -31,9 +31,9 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" @@ -42,6 +42,11 @@ import ( "github.com/prometheus/prometheus/rules" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + const startupTime = 10 * time.Second var ( @@ -120,6 +125,7 @@ func TestFailedStartupExitCode(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() fakeInputFile := "fake-input-file" expectedExitStatus := 2 @@ -206,83 +212,125 @@ func TestWALSegmentSizeBounds(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() - for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) - - // Log stderr in case of failure. - stderr, err := prom.StderrPipe() - require.NoError(t, err) - go func() { - slurp, _ := io.ReadAll(stderr) - t.Log(string(slurp)) - }() - - err = prom.Start() - require.NoError(t, err) - - if expectedExitStatus == 0 { - done := make(chan error, 1) - go func() { done <- prom.Wait() }() - select { - case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) - case <-time.After(startupTime): - prom.Process.Kill() - <-done + for _, tc := range []struct { + size string + exitCode int + }{ + { + size: "9MB", + exitCode: 1, + }, + { + size: "257MB", + exitCode: 1, + }, + { + size: "10", + exitCode: 2, + }, + { + size: "1GB", + exitCode: 1, + }, + { + size: "12MB", + exitCode: 0, + }, + } { + t.Run(tc.size, func(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) + + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) + go func() { + slurp, _ := io.ReadAll(stderr) + t.Log(string(slurp)) + }() + + err = prom.Start() + require.NoError(t, err) + + if tc.exitCode == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + require.Fail(t, "prometheus should be still running: %v", err) + case <-time.After(startupTime): + prom.Process.Kill() + <-done + } + return } - continue - } - err = prom.Wait() - require.Error(t, err) - var exitError *exec.ExitError - require.ErrorAs(t, err, &exitError) - status := exitError.Sys().(syscall.WaitStatus) - require.Equal(t, expectedExitStatus, status.ExitStatus()) + err = prom.Wait() + require.Error(t, err) + var exitError *exec.ExitError + require.ErrorAs(t, err, &exitError) + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitCode, status.ExitStatus()) + }) } } func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { - t.Parallel() - if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() + + for _, tc := range []struct { + size string + exitCode int + }{ + { + size: "512KB", + exitCode: 1, + }, + { + size: "1MB", + exitCode: 0, + }, + } { + t.Run(tc.size, func(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) - for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) - - // Log stderr in case of failure. - stderr, err := prom.StderrPipe() - require.NoError(t, err) - go func() { - slurp, _ := io.ReadAll(stderr) - t.Log(string(slurp)) - }() - - err = prom.Start() - require.NoError(t, err) - - if expectedExitStatus == 0 { - done := make(chan error, 1) - go func() { done <- prom.Wait() }() - select { - case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) - case <-time.After(startupTime): - prom.Process.Kill() - <-done + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) + go func() { + slurp, _ := io.ReadAll(stderr) + t.Log(string(slurp)) + }() + + err = prom.Start() + require.NoError(t, err) + + if tc.exitCode == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + require.Fail(t, "prometheus should be still running: %v", err) + case <-time.After(startupTime): + prom.Process.Kill() + <-done + } + return } - continue - } - err = prom.Wait() - require.Error(t, err) - var exitError *exec.ExitError - require.ErrorAs(t, err, &exitError) - status := exitError.Sys().(syscall.WaitStatus) - require.Equal(t, expectedExitStatus, status.ExitStatus()) + err = prom.Wait() + require.Error(t, err) + var exitError *exec.ExitError + require.ErrorAs(t, err, &exitError) + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitCode, status.ExitStatus()) + }) } } @@ -290,7 +338,7 @@ func TestTimeMetrics(t *testing.T) { tmpDir := t.TempDir() reg := prometheus.NewRegistry() - db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil) + db, err := openDBWithMetrics(tmpDir, promslog.NewNopLogger(), reg, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) @@ -348,7 +396,9 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) + t.Parallel() + + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -366,7 +416,9 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentFailedStartupWithServerFlag(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + t.Parallel() + + prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) output := bytes.Buffer{} prom.Stderr = &output @@ -393,7 +445,9 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) { } func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + t.Parallel() + + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -414,6 +468,7 @@ func TestModeSpecificFlags(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() testcases := []struct { mode string @@ -428,10 +483,11 @@ func TestModeSpecificFlags(t *testing.T) { for _, tc := range testcases { t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { + t.Parallel() args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} if tc.mode == "agent" { - args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) + args = append(args, "--agent", "--config.file="+agentConfig) } else { args = append(args, "--config.file="+promConfig) } @@ -479,6 +535,8 @@ func TestDocumentation(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -503,6 +561,8 @@ func TestDocumentation(t *testing.T) { } func TestRwProtoMsgFlagParser(t *testing.T) { + t.Parallel() + defaultOpts := config.RemoteWriteProtoMsgs{ config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2, } diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index 2011fb123f3..94eec27e79d 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -34,6 +34,7 @@ func TestStartupInterrupt(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 62e317bf8b1..25abf5e9657 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -125,12 +125,61 @@ func (p *queryLogTest) query(t *testing.T) { require.NoError(t, err) require.Equal(t, 200, r.StatusCode) case ruleOrigin: - time.Sleep(2 * time.Second) + // Poll the /api/v1/rules endpoint until a new rule evaluation is detected. + var lastEvalTime time.Time + for { + r, err := http.Get(fmt.Sprintf("http://%s:%d/api/v1/rules", p.host, p.port)) + require.NoError(t, err) + + rulesBody, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + + // Parse the rules response to find the last evaluation time. + newEvalTime := parseLastEvaluation(rulesBody) + if newEvalTime.After(lastEvalTime) { + if !lastEvalTime.IsZero() { + break + } + lastEvalTime = newEvalTime + } + + time.Sleep(100 * time.Millisecond) + } default: panic("can't query this origin") } } +// parseLastEvaluation extracts the last evaluation timestamp from the /api/v1/rules response. +func parseLastEvaluation(rulesBody []byte) time.Time { + var ruleResponse struct { + Status string `json:"status"` + Data struct { + Groups []struct { + Rules []struct { + LastEvaluation string `json:"lastEvaluation"` + } `json:"rules"` + } `json:"groups"` + } `json:"data"` + } + + err := json.Unmarshal(rulesBody, &ruleResponse) + if err != nil { + return time.Time{} + } + + for _, group := range ruleResponse.Data.Groups { + for _, rule := range group.Rules { + if evalTime, err := time.Parse(time.RFC3339Nano, rule.LastEvaluation); err == nil { + return evalTime + } + } + } + + return time.Time{} +} + // queryString returns the expected queryString of a this test. func (p *queryLogTest) queryString() string { switch p.origin { @@ -322,7 +371,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) qc = len(ql) @@ -353,7 +402,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) @@ -393,6 +442,7 @@ func readQueryLog(t *testing.T, path string) []queryLogLine { file, err := os.Open(path) require.NoError(t, err) defer file.Close() + scanner := bufio.NewScanner(file) for scanner.Scan() { var q queryLogLine @@ -406,6 +456,7 @@ func TestQueryLog(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() cwd, err := os.Getwd() require.NoError(t, err) @@ -424,6 +475,7 @@ func TestQueryLog(t *testing.T) { } t.Run(p.String(), func(t *testing.T) { + t.Parallel() p.run(t) }) } diff --git a/cmd/prometheus/reload_test.go b/cmd/prometheus/reload_test.go new file mode 100644 index 00000000000..18a7ff2ad18 --- /dev/null +++ b/cmd/prometheus/reload_test.go @@ -0,0 +1,229 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/util/testutil" +) + +const configReloadMetric = "prometheus_config_last_reload_successful" + +func TestAutoReloadConfig_ValidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +`, + expectedInterval: "15s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func TestAutoReloadConfig_ValidToInvalidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +invalid_syntax +`, + expectedInterval: "30s", + expectedMetric: 0, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func runTestSteps(t *testing.T, steps []struct { + configText string + expectedInterval string + expectedMetric float64 +}, +) { + configDir := t.TempDir() + configFilePath := filepath.Join(configDir, "prometheus.yml") + + t.Logf("Config file path: %s", configFilePath) + + require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file") + + port := testutil.RandomUnprivilegedPort(t) + runPrometheusWithLogging(t, configFilePath, port) + + baseURL := "http://localhost:" + strconv.Itoa(port) + require.Eventually(t, func() bool { + resp, err := http.Get(baseURL + "/-/ready") + if err != nil { + return false + } + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK + }, 5*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time") + + for i, step := range steps { + t.Logf("Step %d", i) + require.NoError(t, os.WriteFile(configFilePath, []byte(step.configText), 0o644), "Failed to write config file for step") + + require.Eventually(t, func() bool { + return verifyScrapeInterval(t, baseURL, step.expectedInterval) && + verifyConfigReloadMetric(t, baseURL, step.expectedMetric) + }, 10*time.Second, 500*time.Millisecond, "Prometheus config reload didn't happen in time") + } +} + +func verifyScrapeInterval(t *testing.T, baseURL, expectedInterval string) bool { + resp, err := http.Get(baseURL + "/api/v1/status/config") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + config := struct { + Data struct { + YAML string `json:"yaml"` + } `json:"data"` + }{} + + require.NoError(t, json.Unmarshal(body, &config)) + return strings.Contains(config.Data.YAML, "scrape_interval: "+expectedInterval) +} + +func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float64) bool { + resp, err := http.Get(baseURL + "/metrics") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + lines := string(body) + var actualValue float64 + found := false + + for _, line := range strings.Split(lines, "\n") { + if strings.HasPrefix(line, configReloadMetric) { + parts := strings.Fields(line) + if len(parts) >= 2 { + actualValue, err = strconv.ParseFloat(parts[1], 64) + require.NoError(t, err) + found = true + break + } + } + } + + return found && actualValue == expectedValue +} + +func captureLogsToTLog(t *testing.T, r io.Reader) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + t.Log(scanner.Text()) + } + if err := scanner.Err(); err != nil { + t.Logf("Error reading logs: %v", err) + } +} + +func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) { + stdoutPipe, stdoutWriter := io.Pipe() + stderrPipe, stderrWriter := io.Pipe() + + var wg sync.WaitGroup + wg.Add(2) + + prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port)) + prom.Stdout = stdoutWriter + prom.Stderr = stderrWriter + + go func() { + defer wg.Done() + captureLogsToTLog(t, stdoutPipe) + }() + go func() { + defer wg.Done() + captureLogsToTLog(t, stderrPipe) + }() + + t.Cleanup(func() { + prom.Process.Kill() + prom.Wait() + stdoutWriter.Close() + stderrWriter.Close() + wg.Wait() + }) + + require.NoError(t, prom.Start()) +} diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 16491f0416f..1408975df91 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -21,9 +21,10 @@ import ( "math" "time" - "github.com/go-kit/log" "github.com/oklog/ulid" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/tsdb" @@ -120,7 +121,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, 2*blockDuration) if err != nil { return fmt.Errorf("block writer: %w", err) } diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index d1b6c0fcd98..49676ee5c4f 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -32,13 +32,13 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" "github.com/google/pprof/profile" "github.com/prometheus/client_golang/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil/promlint" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" "gopkg.in/yaml.v2" @@ -58,10 +58,16 @@ import ( _ "github.com/prometheus/prometheus/plugins" // Register plugins. "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/promqltest" + "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/util/documentcli" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + const ( successExitCode = 0 failureExitCode = 1 @@ -211,6 +217,7 @@ func main() { "test-rule-file", "The unit test file.", ).Required().ExistingFiles() + testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool() testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool() defaultDBPath := "data/" @@ -286,7 +293,7 @@ func main() { promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() - featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() + featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() @@ -316,26 +323,21 @@ func main() { } } - var noDefaultScrapePort bool for _, f := range *featureList { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "no-default-scrape-port": - noDefaultScrapePort = true case "": continue - case "promql-at-modifier", "promql-negative-offset": - fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o) default: - fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o) + fmt.Printf(" WARNING: --enable-feature is currently a no-op") } } } switch parsedCmd { case sdCheckCmd.FullCommand(): - os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer)) + os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer)) case checkConfigCmd.FullCommand(): os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) @@ -391,6 +393,7 @@ func main() { }, *testRulesRun, *testRulesDiff, + *testRulesDebug, *testRulesFiles...), ) @@ -575,7 +578,7 @@ func checkFileExists(fn string) error { func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) { fmt.Println("Checking", filename) - cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger()) + cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger()) if err != nil { return nil, err } @@ -895,30 +898,30 @@ func compare(a, b compareRuleType) int { func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType { var duplicates []compareRuleType - var rules compareRuleTypes + var cRules compareRuleTypes for _, group := range groups { for _, rule := range group.Rules { - rules = append(rules, compareRuleType{ + cRules = append(cRules, compareRuleType{ metric: ruleMetric(rule), - label: labels.FromMap(rule.Labels), + label: rules.FromMaps(group.Labels, rule.Labels), }) } } - if len(rules) < 2 { + if len(cRules) < 2 { return duplicates } - sort.Sort(rules) + sort.Sort(cRules) - last := rules[0] - for i := 1; i < len(rules); i++ { - if compare(last, rules[i]) == 0 { + last := cRules[0] + for i := 1; i < len(cRules); i++ { + if compare(last, cRules[i]) == 0 { // Don't add a duplicated rule multiple times. if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 { - duplicates = append(duplicates, rules[i]) + duplicates = append(duplicates, cRules[i]) } } - last = rules[i] + last = cRules[i] } return duplicates @@ -1182,7 +1185,7 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu return fmt.Errorf("new api client error: %w", err) } - ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api) + ruleImporter := newRuleImporter(promslog.New(&promslog.Config{}), cfg, api) errs := ruleImporter.loadGroups(ctx, files) for _, err := range errs { if err != nil { @@ -1216,7 +1219,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c lb := labels.NewBuilder(labels.EmptyLabels()) for _, tg := range targetGroups { var failures []error - targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb) + targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb) if len(failures) > 0 { first := failures[0] return first diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 2b086851f36..698e6641d1d 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -31,6 +31,7 @@ import ( "testing" "time" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" @@ -38,6 +39,11 @@ import ( "github.com/prometheus/prometheus/promql/promqltest" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + var promtoolPath = os.Args[0] func TestMain(m *testing.M) { @@ -140,7 +146,7 @@ func TestCheckSDFile(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkSDFile(test.file) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -222,7 +228,7 @@ func TestCheckTargetConfig(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -309,7 +315,7 @@ func TestCheckConfigSyntax(t *testing.T) { expectedErrMsg = test.errWindows } if expectedErrMsg != "" { - require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -339,7 +345,7 @@ func TestAuthorizationConfig(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error()) + require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error()) return } require.NoError(t, err) diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 5a18644842b..adb214b8122 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -16,12 +16,12 @@ package main import ( "context" "fmt" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -38,7 +38,7 @@ type queryRangeAPI interface { } type ruleImporter struct { - logger log.Logger + logger *slog.Logger config ruleImporterConfig apiClient queryRangeAPI @@ -57,8 +57,8 @@ type ruleImporterConfig struct { // newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series // written to disk in blocks. -func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter { - level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822)) +func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter { + logger.Info("new rule importer", "component", "backfiller", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822)) return &ruleImporter{ logger: logger, config: config, @@ -80,10 +80,10 @@ func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) // importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks. func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { for name, group := range importer.groups { - level.Info(importer.logger).Log("backfiller", "processing group", "name", name) + importer.logger.Info("processing group", "component", "backfiller", "name", name) for i, r := range group.Rules() { - level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name()) + importer.logger.Info("processing rule", "component", "backfiller", "id", i, "name", r.Name()) if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil { errs = append(errs, err) } @@ -124,7 +124,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName return fmt.Errorf("query range: %w", err) } if warnings != nil { - level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings) + importer.logger.Warn("Range query returned warnings.", "warnings", warnings) } // To prevent races with compaction, a block writer only allows appending samples @@ -133,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), importer.config.outputDir, 2*blockDuration) if err != nil { return fmt.Errorf("new block writer: %w", err) } diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index d55fb0c8963..94e28e570d1 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -21,9 +21,9 @@ import ( "testing" "time" - "github.com/go-kit/log" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" @@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() cfg := ruleImporterConfig{ outputDir: tmpDir, start: start.Add(-10 * time.Hour), diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index e65262d439f..5e005bca8b1 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -20,9 +20,9 @@ import ( "os" "time" - "github.com/go-kit/log" "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" @@ -38,10 +38,10 @@ type sdCheckResult struct { } // CheckSD performs service discovery for the given job name and reports the results. -func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int { + logger := promslog.New(&promslog.Config{}) - cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) + cfg, err := config.LoadFile(sdConfigFiles, false, logger) if err != nil { fmt.Fprintln(os.Stderr, "Cannot load config", err) return failureExitCode @@ -114,7 +114,7 @@ outerLoop: } results := []sdCheckResult{} for _, tgs := range sdCheckResults { - results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...) + results = append(results, getSDCheckResult(tgs, scrapeConfig)...) } res, err := json.MarshalIndent(results, "", " ") @@ -127,7 +127,7 @@ outerLoop: return successExitCode } -func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult { +func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult { sdCheckResults := []sdCheckResult{} lb := labels.NewBuilder(labels.EmptyLabels()) for _, targetGroup := range targetGroups { @@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc } } - res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort) + res, orig, err := scrape.PopulateLabels(lb, scrapeConfig) result := sdCheckResult{ DiscoveredLabels: orig, Labels: res, diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index cb65ee72aa6..44d8084651e 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) { }, } - testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true)) + testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) } diff --git a/cmd/promtool/testdata/config_with_service_discovery_files.yml b/cmd/promtool/testdata/config_with_service_discovery_files.yml index 13b6d7faffb..6a550a84034 100644 --- a/cmd/promtool/testdata/config_with_service_discovery_files.yml +++ b/cmd/promtool/testdata/config_with_service_discovery_files.yml @@ -6,7 +6,7 @@ scrape_configs: alerting: alertmanagers: - scheme: http - api_version: v1 + api_version: v2 file_sd_configs: - files: - nonexistent_file.yml diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index ff511729ba3..e2a8230902e 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -69,13 +69,13 @@ tests: eval_time: 2m exp_samples: - labels: "test_histogram_repeat" - histogram: "{{count:2 sum:3 buckets:[2]}}" + histogram: "{{count:2 sum:3 counter_reset_hint:not_reset buckets:[2]}}" - expr: test_histogram_increase eval_time: 2m exp_samples: - labels: "test_histogram_increase" - histogram: "{{count:4 sum:5.6 buckets:[4]}}" + histogram: "{{count:4 sum:5.6 counter_reset_hint:not_reset buckets:[4]}}" # Ensure a value is stale as soon as it is marked as such. - expr: test_stale @@ -89,11 +89,11 @@ tests: # Ensure lookback delta is respected, when a value is missing. - expr: timestamp(test_missing) - eval_time: 5m + eval_time: 4m59s exp_samples: - value: 0 - expr: timestamp(test_missing) - eval_time: 5m1s + eval_time: 5m exp_samples: [] # Minimal test case to check edge case of a single sample. @@ -113,7 +113,7 @@ tests: - expr: count_over_time(fixed_data[1h]) eval_time: 1h exp_samples: - - value: 61 + - value: 60 - expr: timestamp(fixed_data) eval_time: 1h exp_samples: @@ -183,7 +183,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 1m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 1m10s @@ -194,7 +194,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 2m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 2m59s999ms diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 971ea8ab000..727275aa6b7 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "runtime" @@ -32,9 +33,10 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" "go.uber.org/atomic" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" @@ -60,7 +62,7 @@ type writeBenchmark struct { memprof *os.File blockprof *os.File mtxprof *os.File - logger log.Logger + logger *slog.Logger } func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error { @@ -68,7 +70,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err outPath: outPath, samplesFile: samplesFile, numMetrics: numMetrics, - logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), + logger: promslog.New(&promslog.Config{}), } if b.outPath == "" { dir, err := os.MkdirTemp("", "tsdb_bench") @@ -87,9 +89,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err dir := filepath.Join(b.outPath, "storage") - l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - - st, err := tsdb.Open(dir, l, nil, &tsdb.Options{ + st, err := tsdb.Open(dir, b.logger, nil, &tsdb.Options{ RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), MinBlockDuration: int64(2 * time.Hour / time.Millisecond), }, tsdb.NewDBStats()) @@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) { fmt.Fprintf(tw, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", meta.ULID, - getFormatedTime(meta.MinTime, humanReadable), - getFormatedTime(meta.MaxTime, humanReadable), + getFormattedTime(meta.MinTime, humanReadable), + getFormattedTime(meta.MaxTime, humanReadable), time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond, meta.Stats.NumSamples, meta.Stats.NumChunks, meta.Stats.NumSeries, - getFormatedBytes(b.Size(), humanReadable), + getFormattedBytes(b.Size(), humanReadable), ) } } -func getFormatedTime(timestamp int64, humanReadable bool) string { +func getFormattedTime(timestamp int64, humanReadable bool) string { if humanReadable { return time.Unix(timestamp/1000, 0).UTC().String() } return strconv.FormatInt(timestamp, 10) } -func getFormatedBytes(bytes int64, humanReadable bool) string { +func getFormattedBytes(bytes int64, humanReadable bool) string { if humanReadable { return units.Base2Bytes(bytes).String() } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 7030635d1c0..78dacdc5693 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -26,13 +26,13 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/google/go-cmp/cmp" "github.com/grafana/regexp" "github.com/nsf/jsondiff" "gopkg.in/yaml.v2" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -46,11 +46,11 @@ import ( // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. -func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { - return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...) +func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int { + return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, files...) } -func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { +func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int { failed := false junit := &junitxml.JUnitXML{} @@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, } for _, f := range files { - if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil { + if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, junit.Suite(f)); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, return successExitCode } -func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error { +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug bool, ts *junitxml.TestSuite) []error { b, err := os.ReadFile(filename) if err != nil { ts.Abort(err) @@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } - ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...) + ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, unitTestInp.RuleFiles...) if ers != nil { for _, e := range ers { tc.Fail(e.Error()) @@ -198,7 +198,14 @@ type testGroup struct { } // test performs the unit tests. -func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { +func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug bool, ruleFiles ...string) (outErr []error) { + if debug { + testStart := time.Now() + fmt.Printf("DEBUG: Starting test %s\n", testname) + defer func() { + fmt.Printf("DEBUG: Test %s finished, took %v\n", testname, time.Since(testStart)) + }() + } // Setup testing suite. suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts) if err != nil { @@ -218,7 +225,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i Appendable: suite.Storage(), Context: context.Background(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } m := rules.NewManager(opts) groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...) @@ -482,6 +489,32 @@ Outer: } } + if debug { + ts := tg.maxEvalTime() + // Potentially a test can be specified at a time with fractional seconds, + // which PromQL cannot represent, so round up to the next whole second. + ts = (ts + time.Second).Truncate(time.Second) + expr := fmt.Sprintf(`{__name__=~".+"}[%v]`, ts) + q, err := suite.QueryEngine().NewInstantQuery(context.Background(), suite.Queryable(), nil, expr, mint.Add(ts)) + if err != nil { + fmt.Printf("DEBUG: Failed querying, expr: %q, err: %v\n", expr, err) + return errs + } + res := q.Exec(suite.Context()) + if res.Err != nil { + fmt.Printf("DEBUG: Failed query exec, expr: %q, err: %v\n", expr, res.Err) + return errs + } + switch v := res.Value.(type) { + case promql.Matrix: + fmt.Printf("DEBUG: Dump of all data (input_series and rules) at %v:\n", ts) + fmt.Println(v.String()) + default: + fmt.Printf("DEBUG: Got unexpected type %T\n", v) + return errs + } + } + if len(errs) > 0 { return errs } diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 9bbac28e9fb..9b73dcdc1ca 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -141,14 +141,14 @@ func TestRulesUnitTest(t *testing.T) { reuseCount[tt.want] += len(tt.args.files) } t.Run(tt.name, func(t *testing.T) { - if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want { + if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) } t.Run("Junit xml output ", func(t *testing.T) { var buf bytes.Buffer - if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 { + if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 { t.Errorf("RulesUnitTestResults() = %v, want 1", got) } var test junitxml.JUnitXML @@ -230,7 +230,7 @@ func TestRulesUnitTestRun(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...) + got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...) require.Equal(t, tt.want, got) }) } diff --git a/config/config.go b/config/config.go index 4f80b551bc6..bc73f98d59b 100644 --- a/config/config.go +++ b/config/config.go @@ -16,6 +16,8 @@ package config import ( "errors" "fmt" + "log/slog" + "mime" "net/url" "os" "path/filepath" @@ -25,8 +27,6 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -73,7 +73,7 @@ const ( ) // Load parses the YAML input s into a Config. -func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func Load(s string, logger *slog.Logger) (*Config, error) { cfg := &Config{} // If the entire config body is empty the UnmarshalYAML method is // never called. We thus have to set the DefaultConfig at the entry @@ -85,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return nil, err } - if !expandExternalLabels { - return cfg, nil - } - b := labels.NewScratchBuilder(0) cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { @@ -98,26 +94,40 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro if v := os.Getenv(s); v != "" { return v } - level.Warn(logger).Log("msg", "Empty environment variable", "name", s) + logger.Warn("Empty environment variable", "name", s) return "" }) if newV != v.Value { - level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) + logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV) } // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 b.Add(v.Name, newV) }) - cfg.GlobalConfig.ExternalLabels = b.Labels() + if !b.Labels().IsEmpty() { + cfg.GlobalConfig.ExternalLabels = b.Labels() + } + + switch cfg.OTLPConfig.TranslationStrategy { + case UnderscoreEscapingWithSuffixes: + case "": + case NoUTF8EscapingWithSuffixes: + if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig { + return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled") + } + default: + return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) + } + return cfg, nil } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) { content, err := os.ReadFile(filename) if err != nil { return nil, err } - cfg, err := Load(string(content), expandExternalLabels, logger) + cfg, err := Load(string(content), logger) if err != nil { return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err) } @@ -166,13 +176,13 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. - ScrapeClassicHistograms: false, - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - EnableCompression: true, + AlwaysScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -183,13 +193,18 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } + DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: false, + } + // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, - HTTPClientConfig: config.DefaultHTTPClientConfig, + HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig, } // DefaultQueueConfig is the default remote queue configuration. @@ -236,7 +251,9 @@ var ( } // DefaultOTLPConfig is the default OTLP configuration. - DefaultOTLPConfig = OTLPConfig{} + DefaultOTLPConfig = OTLPConfig{ + TranslationStrategy: UnderscoreEscapingWithSuffixes, + } ) // Config is the top-level configuration for Prometheus's config files. @@ -476,9 +493,22 @@ func (s ScrapeProtocol) Validate() error { return nil } +// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol. +func (s ScrapeProtocol) HeaderMediaType() string { + if _, ok := ScrapeProtocolsHeaders[s]; !ok { + return "" + } + mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s]) + if err != nil { + return "" + } + return mediaType +} + var ( PrometheusProto ScrapeProtocol = "PrometheusProto" PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 @@ -486,6 +516,7 @@ var ( ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", + PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } @@ -495,6 +526,7 @@ var ( DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } @@ -506,6 +538,7 @@ var ( PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } ) @@ -632,10 +665,17 @@ type ScrapeConfig struct { // The protocols to negotiate during a scrape. It tells clients what // protocol are accepted by Prometheus and with what preference (most wanted is first). // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, - // OpenMetricsText1.0.0, PrometheusText0.0.4. + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` - // Whether to scrape a classic histogram that is also exposed as a native histogram. - ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` + // The fallback protocol to use if the Content-Type provided by the target + // is not provided, blank, or not one of the expected values. + // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. + ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` + // Whether to convert all scraped classic histograms into a native histogram with custom buckets. + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` // File to which scrape failures are logged. ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. @@ -783,11 +823,17 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + if c.ScrapeFallbackProtocol != "" { + if err := c.ScrapeFallbackProtocol.Validate(); err != nil { + return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err) + } + } + switch globalConfig.MetricNameValidationScheme { - case "", LegacyValidationConfig: - case UTF8ValidationConfig: + case LegacyValidationConfig: + case "", UTF8ValidationConfig: if model.NameValidationScheme != model.UTF8Validation { - return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names") + panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8") } default: return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) @@ -958,6 +1004,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig { // AlertmanagerAPIVersion represents a version of the // github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'. +// 'v1' is no longer supported. type AlertmanagerAPIVersion string // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -987,7 +1034,7 @@ const ( ) var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{ - AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2, + AlertmanagerAPIVersionV2, } // AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. @@ -1369,9 +1416,20 @@ func getGoGCEnv() int { return DefaultRuntimeConfig.GoGC } +type translationStrategyOption string + +var ( + // NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added. + NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. + // This option will translate all UTF-8 characters to underscores, while adding units and type suffixes. + UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" +) + // OTLPConfig is the configuration for writing to the OTLP endpoint. type OTLPConfig struct { - PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/config/config_test.go b/config/config_test.go index ea1d3b11c99..77cbf9b2eb5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -24,10 +24,10 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -62,6 +62,11 @@ import ( "github.com/prometheus/prometheus/util/testutil" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + func mustParseURL(u string) *config.URL { parsed, err := url.Parse(u) if err != nil { @@ -137,7 +142,7 @@ var expectedConf = &Config{ }, }, FollowRedirects: true, - EnableHTTP2: true, + EnableHTTP2: false, }, }, { @@ -153,7 +158,7 @@ var expectedConf = &Config{ KeyFile: filepath.FromSlash("testdata/valid_key_file"), }, FollowRedirects: true, - EnableHTTP2: true, + EnableHTTP2: false, }, Headers: map[string]string{"name": "value"}, }, @@ -163,6 +168,7 @@ var expectedConf = &Config{ PromoteResourceAttributes: []string{ "k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", }, + TranslationStrategy: UnderscoreEscapingWithSuffixes, }, RemoteReadConfigs: []*RemoteReadConfig{ @@ -201,19 +207,20 @@ var expectedConf = &Config{ { JobName: "prometheus", - HonorLabels: true, - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, - ScrapeFailureLogFile: "testdata/fail_prom.log", + HonorLabels: true, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFallbackProtocol: PrometheusText0_0_4, + ScrapeFailureLogFile: "testdata/fail_prom.log", MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1495,8 +1502,13 @@ var expectedConf = &Config{ }, } +func TestYAMLNotLongerSupportedAMApi(t *testing.T) { + _, err := LoadFile("testdata/config_with_no_longer_supported_am_api_config.yml", false, promslog.NewNopLogger()) + require.Error(t, err) +} + func TestYAMLRoundtrip(t *testing.T) { - want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/roundtrip.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1509,7 +1521,7 @@ func TestYAMLRoundtrip(t *testing.T) { } func TestRemoteWriteRetryOnRateLimit(t *testing.T) { - want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1524,7 +1536,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { func TestOTLPSanitizeResourceAttributes(t *testing.T) { t.Run("good config", func(t *testing.T) { - want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger()) + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1536,25 +1548,87 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) { }) t.Run("bad config", func(t *testing.T) { - _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger()) + _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, promslog.NewNopLogger()) require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`) require.ErrorContains(t, err, `empty promoted OTel resource attribute`) }) } +func TestOTLPAllowUTF8(t *testing.T) { + t.Run("good config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml") + verify := func(t *testing.T, conf *Config, err error) { + t.Helper() + require.NoError(t, err) + require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy) + } + + t.Run("LoadFile", func(t *testing.T) { + conf, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, conf, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + conf, err := Load(string(content), promslog.NewNopLogger()) + verify(t, conf, err) + }) + }) + + t.Run("incompatible config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + t.Log("err", err) + verify(t, err) + }) + }) + + t.Run("bad config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + verify(t, err) + }) + }) +} + func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. - _, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/global_timeout.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger()) + require.NoError(t, err) require.Equal(t, expectedConf, c) } func TestScrapeIntervalLarger(t *testing.T) { - c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) require.Len(t, c.ScrapeConfigs, 1) for _, sc := range c.ScrapeConfigs { @@ -1564,7 +1638,7 @@ func TestScrapeIntervalLarger(t *testing.T) { // YAML marshaling must not reveal authentication credentials. func TestElideSecrets(t *testing.T) { - c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) secretRe := regexp.MustCompile(`\\u003csecret\\u003e|`) @@ -1581,31 +1655,31 @@ func TestElideSecrets(t *testing.T) { func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { // Parse a valid file that sets a rule files with an absolute path - c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger()) + c, err := LoadFile(ruleFilesConfigFile, false, promslog.NewNopLogger()) require.NoError(t, err) require.Equal(t, ruleFilesExpectedConf, c) } func TestKubernetesEmptyAPIServer(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) } func TestKubernetesWithKubeConfig(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) } func TestKubernetesSelectors(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) } @@ -2075,24 +2149,39 @@ var expectedErrors = []struct { }, { filename: "scrape_config_files_scrape_protocols.bad.yml", - errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`, + errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0] for scrape config with job name "node"`, }, { filename: "scrape_config_files_scrape_protocols2.bad.yml", errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`, }, + { + filename: "scrape_config_files_fallback_scrape_protocol1.bad.yml", + errMsg: `parsing YAML file testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml: invalid fallback_scrape_protocol for scrape config with job name "node": unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0]`, + }, + { + filename: "scrape_config_files_fallback_scrape_protocol2.bad.yml", + errMsg: `unmarshal errors`, + }, } func TestBadConfigs(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() for _, ee := range expectedErrors { - _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) - require.Error(t, err, "%s", ee.filename) - require.Contains(t, err.Error(), ee.errMsg, + _, err := LoadFile("testdata/"+ee.filename, false, promslog.NewNopLogger()) + require.ErrorContains(t, err, ee.errMsg, "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) } } func TestBadStaticConfigsJSON(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() content, err := os.ReadFile("testdata/static_config.bad.json") require.NoError(t, err) var tg targetgroup.Group @@ -2101,6 +2190,10 @@ func TestBadStaticConfigsJSON(t *testing.T) { } func TestBadStaticConfigsYML(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() content, err := os.ReadFile("testdata/static_config.bad.yml") require.NoError(t, err) var tg targetgroup.Group @@ -2109,7 +2202,7 @@ func TestBadStaticConfigsYML(t *testing.T) { } func TestEmptyConfig(t *testing.T) { - c, err := Load("", false, log.NewNopLogger()) + c, err := Load("", promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig require.Equal(t, exp, *c) @@ -2119,38 +2212,34 @@ func TestExpandExternalLabels(t *testing.T) { // Cleanup ant TEST env variable that could exist on the system. os.Setenv("TEST", "") - c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger()) - require.NoError(t, err) - testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels) - - c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) + c, err := LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) os.Setenv("TEST", "TestValue") - c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) } func TestAgentMode(t *testing.T) { - _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger()) + _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field alerting is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field alerting is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field rule_files is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field remote_read is not allowed in agent mode") - c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger()) + c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, promslog.NewNopLogger()) require.NoError(t, err) require.Empty(t, c.RemoteWriteConfigs) - c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger()) + c, err = LoadFile("testdata/agent_mode.good.yml", true, promslog.NewNopLogger()) require.NoError(t, err) require.Len(t, c.RemoteWriteConfigs, 1) require.Equal( @@ -2161,7 +2250,7 @@ func TestAgentMode(t *testing.T) { } func TestEmptyGlobalBlock(t *testing.T) { - c, err := Load("global:\n", false, log.NewNopLogger()) + c, err := Load("global:\n", promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig exp.Runtime = DefaultRuntimeConfig @@ -2316,7 +2405,7 @@ func TestGetScrapeConfigs(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger()) + c, err := LoadFile(tc.configFile, false, promslog.NewNopLogger()) require.NoError(t, err) scfgs, err := c.GetScrapeConfigs() @@ -2334,7 +2423,7 @@ func kubernetesSDHostURL() config.URL { } func TestScrapeConfigDisableCompression(t *testing.T) { - want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -2365,23 +2454,23 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) { { name: "global setting implies local settings", inputFile: "scrape_config_global_validation_mode", - expectScheme: "utf8", + expectScheme: "legacy", }, { name: "local setting", inputFile: "scrape_config_local_validation_mode", - expectScheme: "utf8", + expectScheme: "legacy", }, { name: "local setting overrides global setting", inputFile: "scrape_config_local_global_validation_mode", - expectScheme: "legacy", + expectScheme: "utf8", }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger()) + want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -2394,3 +2483,54 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) { }) } } + +func TestScrapeProtocolHeader(t *testing.T) { + tests := []struct { + name string + proto ScrapeProtocol + expectedValue string + }{ + { + name: "blank", + proto: ScrapeProtocol(""), + expectedValue: "", + }, + { + name: "invalid", + proto: ScrapeProtocol("invalid"), + expectedValue: "", + }, + { + name: "prometheus protobuf", + proto: PrometheusProto, + expectedValue: "application/vnd.google.protobuf", + }, + { + name: "prometheus text 0.0.4", + proto: PrometheusText0_0_4, + expectedValue: "text/plain", + }, + { + name: "prometheus text 1.0.0", + proto: PrometheusText1_0_0, + expectedValue: "text/plain", + }, + { + name: "openmetrics 0.0.1", + proto: OpenMetricsText0_0_1, + expectedValue: "application/openmetrics-text", + }, + { + name: "openmetrics 1.0.0", + proto: OpenMetricsText1_0_0, + expectedValue: "application/openmetrics-text", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mediaType := tc.proto.HeaderMediaType() + + require.Equal(t, tc.expectedValue, mediaType) + }) + } +} diff --git a/config/reload.go b/config/reload.go new file mode 100644 index 00000000000..8be1b28d8ab --- /dev/null +++ b/config/reload.go @@ -0,0 +1,92 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" +) + +type ExternalFilesConfig struct { + RuleFiles []string `yaml:"rule_files"` + ScrapeConfigFiles []string `yaml:"scrape_config_files"` +} + +// GenerateChecksum generates a checksum of the YAML file and the files it references. +func GenerateChecksum(yamlFilePath string) (string, error) { + hash := sha256.New() + + yamlContent, err := os.ReadFile(yamlFilePath) + if err != nil { + return "", fmt.Errorf("error reading YAML file: %w", err) + } + _, err = hash.Write(yamlContent) + if err != nil { + return "", fmt.Errorf("error writing YAML file to hash: %w", err) + } + + var config ExternalFilesConfig + if err := yaml.Unmarshal(yamlContent, &config); err != nil { + return "", fmt.Errorf("error unmarshalling YAML: %w", err) + } + + dir := filepath.Dir(yamlFilePath) + + for i, file := range config.RuleFiles { + config.RuleFiles[i] = filepath.Join(dir, file) + } + for i, file := range config.ScrapeConfigFiles { + config.ScrapeConfigFiles[i] = filepath.Join(dir, file) + } + + files := map[string][]string{ + "r": config.RuleFiles, // "r" for rule files + "s": config.ScrapeConfigFiles, // "s" for scrape config files + } + + for _, prefix := range []string{"r", "s"} { + for _, pattern := range files[prefix] { + matchingFiles, err := filepath.Glob(pattern) + if err != nil { + return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err) + } + + for _, file := range matchingFiles { + // Write prefix to the hash ("r" or "s") followed by \0, then + // the file path. + _, err = hash.Write([]byte(prefix + "\x00" + file + "\x00")) + if err != nil { + return "", fmt.Errorf("error writing %q path to hash: %w", file, err) + } + + // Read and hash the content of the file. + content, err := os.ReadFile(file) + if err != nil { + return "", fmt.Errorf("error reading file %s: %w", file, err) + } + _, err = hash.Write(append(content, []byte("\x00")...)) + if err != nil { + return "", fmt.Errorf("error writing %q content to hash: %w", file, err) + } + } + } + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/config/reload_test.go b/config/reload_test.go new file mode 100644 index 00000000000..f0f44f35880 --- /dev/null +++ b/config/reload_test.go @@ -0,0 +1,222 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGenerateChecksum(t *testing.T) { + tmpDir := t.TempDir() + + // Define paths for the temporary files. + yamlFilePath := filepath.Join(tmpDir, "test.yml") + ruleFilePath := filepath.Join(tmpDir, "rule_file.yml") + scrapeConfigFilePath := filepath.Join(tmpDir, "scrape_config.yml") + + // Define initial and modified content for the files. + originalRuleContent := "groups:\n- name: example\n rules:\n - alert: ExampleAlert" + modifiedRuleContent := "groups:\n- name: example\n rules:\n - alert: ModifiedAlert" + + originalScrapeConfigContent := "scrape_configs:\n- job_name: example" + modifiedScrapeConfigContent := "scrape_configs:\n- job_name: modified_example" + + // Define YAML content referencing the rule and scrape config files. + yamlContent := ` +rule_files: + - rule_file.yml +scrape_config_files: + - scrape_config.yml +` + + // Write initial content to files. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Generate the original checksum. + originalChecksum := calculateChecksum(t, yamlFilePath) + + t.Run("Rule File Change", func(t *testing.T) { + // Modify the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(modifiedRuleContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Change", func(t *testing.T) { + // Modify the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(modifiedScrapeConfigContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Rule File Deletion", func(t *testing.T) { + // Delete the rule file. + require.NoError(t, os.Remove(ruleFilePath)) + + // Checksum should change. + deletedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, deletedChecksum) + + // Restore the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Deletion", func(t *testing.T) { + // Delete the scrape config file. + require.NoError(t, os.Remove(scrapeConfigFilePath)) + + // Checksum should change. + deletedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, deletedChecksum) + + // Restore the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Main File Change", func(t *testing.T) { + // Modify the main YAML file. + modifiedYamlContent := ` +global: + scrape_interval: 3s +rule_files: + - rule_file.yml +scrape_config_files: + - scrape_config.yml +` + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the main YAML file. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Rule File Removed from YAML Config", func(t *testing.T) { + // Modify the YAML content to remove the rule file. + modifiedYamlContent := ` +scrape_config_files: + - scrape_config.yml +` + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the YAML content. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Removed from YAML Config", func(t *testing.T) { + // Modify the YAML content to remove the scrape config file. + modifiedYamlContent := ` +rule_files: + - rule_file.yml +` + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the YAML content. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Empty Rule File", func(t *testing.T) { + // Write an empty rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(""), 0o644)) + + // Checksum should change. + emptyChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, emptyChecksum) + + // Restore the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Empty Scrape Config File", func(t *testing.T) { + // Write an empty scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(""), 0o644)) + + // Checksum should change. + emptyChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, emptyChecksum) + + // Restore the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) +} + +// calculateChecksum generates a checksum for the given YAML file path. +func calculateChecksum(t *testing.T, yamlFilePath string) string { + checksum, err := GenerateChecksum(yamlFilePath) + require.NoError(t, err) + require.NotEmpty(t, checksum) + return checksum +} diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 9eb79954325..2501652d5b9 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -74,6 +74,8 @@ scrape_configs: # metrics_path defaults to '/metrics' # scheme defaults to 'http'. + fallback_scrape_protocol: PrometheusText0.0.4 + scrape_failure_log_file: fail_prom.log file_sd_configs: - files: diff --git a/config/testdata/config_with_deprecated_am_api_config.yml b/config/testdata/config_with_deprecated_am_api_config.yml new file mode 100644 index 00000000000..ac89537ff1b --- /dev/null +++ b/config/testdata/config_with_deprecated_am_api_config.yml @@ -0,0 +1,7 @@ +alerting: + alertmanagers: + - scheme: http + api_version: v1 + file_sd_configs: + - files: + - nonexistent_file.yml diff --git a/config/testdata/jobname_dup.bad.yml b/config/testdata/jobname_dup.bad.yml index 0265493c30c..d03cb0cf979 100644 --- a/config/testdata/jobname_dup.bad.yml +++ b/config/testdata/jobname_dup.bad.yml @@ -1,4 +1,6 @@ # Two scrape configs with the same job names are not allowed. +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus - job_name: service-x diff --git a/config/testdata/lowercase.bad.yml b/config/testdata/lowercase.bad.yml index 9bc95833417..6dd72e6476a 100644 --- a/config/testdata/lowercase.bad.yml +++ b/config/testdata/lowercase.bad.yml @@ -1,3 +1,5 @@ +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus relabel_configs: diff --git a/config/testdata/otlp_allow_utf8.bad.yml b/config/testdata/otlp_allow_utf8.bad.yml new file mode 100644 index 00000000000..488e4b05584 --- /dev/null +++ b/config/testdata/otlp_allow_utf8.bad.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: Invalid diff --git a/config/testdata/otlp_allow_utf8.good.yml b/config/testdata/otlp_allow_utf8.good.yml new file mode 100644 index 00000000000..f3069d2fddb --- /dev/null +++ b/config/testdata/otlp_allow_utf8.good.yml @@ -0,0 +1,2 @@ +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/config/testdata/otlp_allow_utf8.incompatible.yml b/config/testdata/otlp_allow_utf8.incompatible.yml new file mode 100644 index 00000000000..2625c24131e --- /dev/null +++ b/config/testdata/otlp_allow_utf8.incompatible.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml b/config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml new file mode 100644 index 00000000000..07cfe47594e --- /dev/null +++ b/config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: node + fallback_scrape_protocol: "prometheusproto" + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml b/config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml new file mode 100644 index 00000000000..c5d133f9c46 --- /dev/null +++ b/config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: node + fallback_scrape_protocol: ["OpenMetricsText1.0.0", "PrometheusText0.0.4"] + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_global_validation_mode.yml b/config/testdata/scrape_config_global_validation_mode.yml index 1548554397c..e9b0618c709 100644 --- a/config/testdata/scrape_config_global_validation_mode.yml +++ b/config/testdata/scrape_config_global_validation_mode.yml @@ -1,4 +1,4 @@ global: - metric_name_validation_scheme: utf8 + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus diff --git a/config/testdata/scrape_config_local_global_validation_mode.yml b/config/testdata/scrape_config_local_global_validation_mode.yml index d13605e21d9..30b54834a56 100644 --- a/config/testdata/scrape_config_local_global_validation_mode.yml +++ b/config/testdata/scrape_config_local_global_validation_mode.yml @@ -1,5 +1,5 @@ global: - metric_name_validation_scheme: utf8 + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus - metric_name_validation_scheme: legacy + metric_name_validation_scheme: utf8 diff --git a/config/testdata/scrape_config_local_validation_mode.yml b/config/testdata/scrape_config_local_validation_mode.yml index fad4235806a..90279ff0818 100644 --- a/config/testdata/scrape_config_local_validation_mode.yml +++ b/config/testdata/scrape_config_local_validation_mode.yml @@ -1,3 +1,3 @@ scrape_configs: - job_name: prometheus - metric_name_validation_scheme: utf8 + metric_name_validation_scheme: legacy diff --git a/console_libraries/menu.lib b/console_libraries/menu.lib deleted file mode 100644 index 199ebf9f480..00000000000 --- a/console_libraries/menu.lib +++ /dev/null @@ -1,82 +0,0 @@ -{{/* vim: set ft=html: */}} - -{{/* Navbar, should be passed . */}} -{{ define "navbar" }} - -{{ end }} - -{{/* LHS menu, should be passed . */}} -{{ define "menu" }} -
- -
-{{ end }} - -{{/* Helper, pass (args . path name) */}} -{{ define "_menuItem" }} - -{{ end }} - diff --git a/console_libraries/prom.lib b/console_libraries/prom.lib deleted file mode 100644 index d7d436f9474..00000000000 --- a/console_libraries/prom.lib +++ /dev/null @@ -1,138 +0,0 @@ -{{/* vim: set ft=html: */}} -{{/* Load Prometheus console library JS/CSS. Should go in */}} -{{ define "prom_console_head" }} - - - - - - - - - - - - - -{{ end }} - -{{/* Top of all pages. */}} -{{ define "head" -}} - - - -{{ template "prom_console_head" }} - - -{{ template "navbar" . }} - -{{ template "menu" . }} -{{ end }} - -{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }} -{{ define "humanize" }}{{ humanize . }}{{ end }} -{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }} -{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }} -{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }} -{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }} -{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }} -{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }} -{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }} - -{{/* prom_query_drilldown (args expr suffix? renderTemplate?) -Displays the result of the expression, with a link to /graph for it. - -renderTemplate is the name of the template to use to render the value. -*/}} -{{ define "prom_query_drilldown" }} -{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }} -{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }} -{{ end }} - -{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}" - -{{ define "prom_right_table_head" }} -
- -{{ end }} -{{ define "prom_right_table_tail" }} -
-
-{{ end }} - -{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}} -{{ define "prom_right_table_job_head" }} - - {{ . }} - {{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }} - - - CPU - {{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }} - -{{ end }} - - -{{ define "prom_content_head" }} -
-
-{{ template "prom_graph_timecontrol" . }} -{{ end }} -{{ define "prom_content_tail" }} -
-
-{{ end }} - -{{ define "prom_graph_timecontrol" }} -
-
-
- -
-
- -
-
-
- - - -
-
-
- -
-{{ end }} - -{{/* Bottom of all pages. */}} -{{ define "tail" }} - - -{{ end }} diff --git a/consoles/index.html.example b/consoles/index.html.example deleted file mode 100644 index c725d30dea3..00000000000 --- a/consoles/index.html.example +++ /dev/null @@ -1,28 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Overview

-

These are example consoles for Prometheus.

- -

These consoles expect exporters to have the following job labels:

- - - - - - - - - - - - - -
ExporterJob label
Node Exporternode
Prometheusprometheus
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-cpu.html b/consoles/node-cpu.html deleted file mode 100644 index 284ad738f2b..00000000000 --- a/consoles/node-cpu.html +++ /dev/null @@ -1,60 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }} - -{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }} - - {{ .Labels.mode | title }} CPU - {{ .Value | printf "%.1f" }}% - -{{ end }} - Misc - - Processes Running - {{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Processes Blocked - {{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Forks - {{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Context Switches - {{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Interrupts - {{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - 1m Loadavg - {{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }} - - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-disk.html b/consoles/node-disk.html deleted file mode 100644 index ffff41b7978..00000000000 --- a/consoles/node-disk.html +++ /dev/null @@ -1,78 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - {{ .Labels.device }} - - Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - - - Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - - - Avg Read Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - - - Avg Write Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

Disk I/O Utilization

-
- -

Filesystem Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-overview.html b/consoles/node-overview.html deleted file mode 100644 index 4ae8984b99a..00000000000 --- a/consoles/node-overview.html +++ /dev/null @@ -1,121 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - Overview - - User CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - System CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - Memory Total - {{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Memory Free - {{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Network - -{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Received - {{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - - - {{ .Labels.device }} Transmitted - {{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - -{{ end }} -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- - -

Disk I/O Utilization

-
- - -

Memory

-
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node.html b/consoles/node.html deleted file mode 100644 index c1dfc1a8913..00000000000 --- a/consoles/node.html +++ /dev/null @@ -1,35 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Node - {{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node

- - - - - - - - -{{ range query "up{job='node'}" | sortByLabel "instance" }} - - - Yes{{ else }} class="alert-danger">No{{ end }} - - - -{{ else }} - -{{ end }} -
NodeUpCPU
Used
Memory
Available
{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}
No nodes found.
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus-overview.html b/consoles/prometheus-overview.html deleted file mode 100644 index 08e027de066..00000000000 --- a/consoles/prometheus-overview.html +++ /dev/null @@ -1,96 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Overview - - - CPU - {{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Version - {{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}} - - - - Storage - - - Ingested Samples - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }} - - - Head Series - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Blocks Loaded - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Rules - - - Evaluation Duration - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Latency - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Queue - {{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - HTTP Server - -{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }} - - {{ .Labels.handler }} - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -
-

Prometheus Overview - {{ .Params.instance }}

- -

Ingested Samples

-
- - -

HTTP Server

-
- -
-{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus.html b/consoles/prometheus.html deleted file mode 100644 index e0d026376d1..00000000000 --- a/consoles/prometheus.html +++ /dev/null @@ -1,34 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Prometheus - {{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Prometheus

- - - - - - - - -{{ range query "up{job='prometheus'}" | sortByLabel "instance" }} - - - - - - -{{ else }} - -{{ end }} -
PrometheusUpIngested SamplesMemory
{{ .Labels.instance }}Yes{{ else }} class="alert-danger">No{{ end }}{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}
No devices found.
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/discovery/README.md b/discovery/README.md index 4c066086256..d5418e7fb11 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -233,7 +233,7 @@ type Config interface { } type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger // A registerer for the Discoverer's metrics. Registerer prometheus.Registerer diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index a44912481a8..5a725cb48f3 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" @@ -29,11 +30,11 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -146,9 +147,9 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // the Discoverer interface. type EC2Discovery struct { *refresh.Discovery - logger log.Logger + logger *slog.Logger cfg *EC2SDConfig - ec2 *ec2.EC2 + ec2 ec2iface.EC2API // azToAZID maps this account's availability zones to their underlying AZ // ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so @@ -157,14 +158,14 @@ type EC2Discovery struct { } // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. -func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { +func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { m, ok := metrics.(*ec2Metrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } d := &EC2Discovery{ logger: logger, @@ -182,7 +183,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.Dis return d, nil } -func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { +func (d *EC2Discovery) ec2Client(context.Context) (ec2iface.EC2API, error) { if d.ec2 != nil { return d.ec2, nil } @@ -254,8 +255,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error // Prometheus requires a reload if AWS adds a new AZ to the region. if d.azToAZID == nil { if err := d.refreshAZIDs(ctx); err != nil { - level.Debug(d.logger).Log( - "msg", "Unable to describe availability zones", + d.logger.Debug( + "Unable to describe availability zones", "err", err) } } @@ -296,8 +297,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone] if !ok && d.azToAZID != nil { - level.Debug(d.logger).Log( - "msg", "Availability zone ID not found", + d.logger.Debug( + "Availability zone ID not found", "az", *inst.Placement.AvailabilityZone) } labels[ec2LabelAZID] = model.LabelValue(azID) diff --git a/discovery/aws/ec2_test.go b/discovery/aws/ec2_test.go new file mode 100644 index 00000000000..f34065c23e1 --- /dev/null +++ b/discovery/aws/ec2_test.go @@ -0,0 +1,434 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "errors" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "go.uber.org/goleak" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +// Helper function to get pointers on literals. +// NOTE: this is common between a few tests. In the future it might worth to move this out into a separate package. +func strptr(str string) *string { + return &str +} + +func boolptr(b bool) *bool { + return &b +} + +func int64ptr(i int64) *int64 { + return &i +} + +// Struct for test data. +type ec2DataStore struct { + region string + + azToAZID map[string]string + + ownerID string + + instances []*ec2.Instance +} + +// The tests itself. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} + +func TestEC2DiscoveryRefreshAZIDs(t *testing.T) { + ctx := context.Background() + + // iterate through the test cases + for _, tt := range []struct { + name string + shouldFail bool + ec2Data *ec2DataStore + }{ + { + name: "Normal", + shouldFail: false, + ec2Data: &ec2DataStore{ + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + }, + }, + { + name: "HandleError", + shouldFail: true, + ec2Data: &ec2DataStore{}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + client := newMockEC2Client(tt.ec2Data) + + d := &EC2Discovery{ + ec2: client, + } + + err := d.refreshAZIDs(ctx) + if tt.shouldFail { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, client.ec2Data.azToAZID, d.azToAZID) + } + }) + } +} + +func TestEC2DiscoveryRefresh(t *testing.T) { + ctx := context.Background() + + // iterate through the test cases + for _, tt := range []struct { + name string + ec2Data *ec2DataStore + expected []*targetgroup.Group + }{ + { + name: "NoPrivateIp", + ec2Data: &ec2DataStore{ + region: "region-noprivateip", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + instances: []*ec2.Instance{ + { + InstanceId: strptr("instance-id-noprivateip"), + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-noprivateip", + }, + }, + }, + { + name: "NoVpc", + ec2Data: &ec2DataStore{ + region: "region-novpc", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + ownerID: "owner-id-novpc", + instances: []*ec2.Instance{ + { + // set every possible options and test them here + Architecture: strptr("architecture-novpc"), + ImageId: strptr("ami-novpc"), + InstanceId: strptr("instance-id-novpc"), + InstanceLifecycle: strptr("instance-lifecycle-novpc"), + InstanceType: strptr("instance-type-novpc"), + Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")}, + Platform: strptr("platform-novpc"), + PrivateDnsName: strptr("private-dns-novpc"), + PrivateIpAddress: strptr("1.2.3.4"), + PublicDnsName: strptr("public-dns-novpc"), + PublicIpAddress: strptr("42.42.42.2"), + State: &ec2.InstanceState{Name: strptr("running")}, + // test tags once and for all + Tags: []*ec2.Tag{ + {Key: strptr("tag-1-key"), Value: strptr("tag-1-value")}, + {Key: strptr("tag-2-key"), Value: strptr("tag-2-value")}, + nil, + {Value: strptr("tag-4-value")}, + {Key: strptr("tag-5-key")}, + }, + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-novpc", + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue("1.2.3.4:4242"), + "__meta_ec2_ami": model.LabelValue("ami-novpc"), + "__meta_ec2_architecture": model.LabelValue("architecture-novpc"), + "__meta_ec2_availability_zone": model.LabelValue("azname-b"), + "__meta_ec2_availability_zone_id": model.LabelValue("azid-2"), + "__meta_ec2_instance_id": model.LabelValue("instance-id-novpc"), + "__meta_ec2_instance_lifecycle": model.LabelValue("instance-lifecycle-novpc"), + "__meta_ec2_instance_type": model.LabelValue("instance-type-novpc"), + "__meta_ec2_instance_state": model.LabelValue("running"), + "__meta_ec2_owner_id": model.LabelValue("owner-id-novpc"), + "__meta_ec2_platform": model.LabelValue("platform-novpc"), + "__meta_ec2_private_dns_name": model.LabelValue("private-dns-novpc"), + "__meta_ec2_private_ip": model.LabelValue("1.2.3.4"), + "__meta_ec2_public_dns_name": model.LabelValue("public-dns-novpc"), + "__meta_ec2_public_ip": model.LabelValue("42.42.42.2"), + "__meta_ec2_region": model.LabelValue("region-novpc"), + "__meta_ec2_tag_tag_1_key": model.LabelValue("tag-1-value"), + "__meta_ec2_tag_tag_2_key": model.LabelValue("tag-2-value"), + }, + }, + }, + }, + }, + { + name: "Ipv4", + ec2Data: &ec2DataStore{ + region: "region-ipv4", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + instances: []*ec2.Instance{ + { + // just the minimum needed for the refresh work + ImageId: strptr("ami-ipv4"), + InstanceId: strptr("instance-id-ipv4"), + InstanceType: strptr("instance-type-ipv4"), + Placement: &ec2.Placement{AvailabilityZone: strptr("azname-c")}, + PrivateIpAddress: strptr("5.6.7.8"), + State: &ec2.InstanceState{Name: strptr("running")}, + SubnetId: strptr("azid-3"), + VpcId: strptr("vpc-ipv4"), + // network intefaces + NetworkInterfaces: []*ec2.InstanceNetworkInterface{ + // interface without subnet -> should be ignored + { + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:1::1"), + IsPrimaryIpv6: boolptr(true), + }, + }, + }, + // interface with subnet, no IPv6 + { + Ipv6Addresses: []*ec2.InstanceIpv6Address{}, + SubnetId: strptr("azid-3"), + }, + // interface with another subnet, no IPv6 + { + Ipv6Addresses: []*ec2.InstanceIpv6Address{}, + SubnetId: strptr("azid-1"), + }, + }, + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-ipv4", + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue("5.6.7.8:4242"), + "__meta_ec2_ami": model.LabelValue("ami-ipv4"), + "__meta_ec2_availability_zone": model.LabelValue("azname-c"), + "__meta_ec2_availability_zone_id": model.LabelValue("azid-3"), + "__meta_ec2_instance_id": model.LabelValue("instance-id-ipv4"), + "__meta_ec2_instance_state": model.LabelValue("running"), + "__meta_ec2_instance_type": model.LabelValue("instance-type-ipv4"), + "__meta_ec2_owner_id": model.LabelValue(""), + "__meta_ec2_primary_subnet_id": model.LabelValue("azid-3"), + "__meta_ec2_private_ip": model.LabelValue("5.6.7.8"), + "__meta_ec2_region": model.LabelValue("region-ipv4"), + "__meta_ec2_subnet_id": model.LabelValue(",azid-3,azid-1,"), + "__meta_ec2_vpc_id": model.LabelValue("vpc-ipv4"), + }, + }, + }, + }, + }, + { + name: "Ipv6", + ec2Data: &ec2DataStore{ + region: "region-ipv6", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + instances: []*ec2.Instance{ + { + // just the minimum needed for the refresh work + ImageId: strptr("ami-ipv6"), + InstanceId: strptr("instance-id-ipv6"), + InstanceType: strptr("instance-type-ipv6"), + Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")}, + PrivateIpAddress: strptr("9.10.11.12"), + State: &ec2.InstanceState{Name: strptr("running")}, + SubnetId: strptr("azid-2"), + VpcId: strptr("vpc-ipv6"), + // network intefaces + NetworkInterfaces: []*ec2.InstanceNetworkInterface{ + // interface without primary IPv6, index 2 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(3), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:2::1:1"), + IsPrimaryIpv6: boolptr(false), + }, + }, + SubnetId: strptr("azid-2"), + }, + // interface with primary IPv6, index 1 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(1), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:2::2:1"), + IsPrimaryIpv6: boolptr(false), + }, + { + Ipv6Address: strptr("2001:db8:2::2:2"), + IsPrimaryIpv6: boolptr(true), + }, + }, + SubnetId: strptr("azid-2"), + }, + // interface with primary IPv6, index 3 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(3), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:2::3:1"), + IsPrimaryIpv6: boolptr(true), + }, + }, + SubnetId: strptr("azid-1"), + }, + // interface without primary IPv6, index 0 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(0), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{}, + SubnetId: strptr("azid-3"), + }, + }, + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-ipv6", + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue("9.10.11.12:4242"), + "__meta_ec2_ami": model.LabelValue("ami-ipv6"), + "__meta_ec2_availability_zone": model.LabelValue("azname-b"), + "__meta_ec2_availability_zone_id": model.LabelValue("azid-2"), + "__meta_ec2_instance_id": model.LabelValue("instance-id-ipv6"), + "__meta_ec2_instance_state": model.LabelValue("running"), + "__meta_ec2_instance_type": model.LabelValue("instance-type-ipv6"), + "__meta_ec2_ipv6_addresses": model.LabelValue(",2001:db8:2::1:1,2001:db8:2::2:1,2001:db8:2::2:2,2001:db8:2::3:1,"), + "__meta_ec2_owner_id": model.LabelValue(""), + "__meta_ec2_primary_ipv6_addresses": model.LabelValue(",,2001:db8:2::2:2,,2001:db8:2::3:1,"), + "__meta_ec2_primary_subnet_id": model.LabelValue("azid-2"), + "__meta_ec2_private_ip": model.LabelValue("9.10.11.12"), + "__meta_ec2_region": model.LabelValue("region-ipv6"), + "__meta_ec2_subnet_id": model.LabelValue(",azid-2,azid-1,azid-3,"), + "__meta_ec2_vpc_id": model.LabelValue("vpc-ipv6"), + }, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + client := newMockEC2Client(tt.ec2Data) + + d := &EC2Discovery{ + ec2: client, + cfg: &EC2SDConfig{ + Port: 4242, + Region: client.ec2Data.region, + }, + } + + g, err := d.refresh(ctx) + require.NoError(t, err) + require.Equal(t, tt.expected, g) + }) + } +} + +// EC2 client mock. +type mockEC2Client struct { + ec2iface.EC2API + ec2Data ec2DataStore +} + +func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client { + client := mockEC2Client{ + ec2Data: *ec2Data, + } + return &client +} + +func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *ec2.DescribeAvailabilityZonesInput, opts ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) { + if len(m.ec2Data.azToAZID) == 0 { + return nil, errors.New("No AZs found") + } + + azs := make([]*ec2.AvailabilityZone, len(m.ec2Data.azToAZID)) + + i := 0 + for k, v := range m.ec2Data.azToAZID { + azs[i] = &ec2.AvailabilityZone{ + ZoneName: strptr(k), + ZoneId: strptr(v), + } + i++ + } + + return &ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: azs, + }, nil +} + +func (m *mockEC2Client) DescribeInstancesPagesWithContext(ctx aws.Context, input *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, opts ...request.Option) error { + r := ec2.Reservation{} + r.SetInstances(m.ec2Data.instances) + r.SetOwnerId(m.ec2Data.ownerID) + + o := ec2.DescribeInstancesOutput{} + o.SetReservations([]*ec2.Reservation{&r}) + + _ = fn(&o, true) + + return nil +} diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 0ad7f2d5416..0b046be6d9a 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" @@ -29,10 +30,10 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -130,14 +131,14 @@ type LightsailDiscovery struct { } // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. -func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { +func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { m, ok := metrics.(*lightsailMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } d := &LightsailDiscovery{ diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 70d95b9f3a0..35bbc3847c5 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "math/rand" "net" "net/http" @@ -35,10 +36,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -175,7 +175,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type Discovery struct { *refresh.Discovery - logger log.Logger + logger *slog.Logger cfg *SDConfig port int cache *cache.Cache[string, *armnetwork.Interface] @@ -183,14 +183,14 @@ type Discovery struct { } // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. -func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*azureMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) d := &Discovery{ @@ -228,26 +228,26 @@ type azureClient struct { vm *armcompute.VirtualMachinesClient vmss *armcompute.VirtualMachineScaleSetsClient vmssvm *armcompute.VirtualMachineScaleSetVMsClient - logger log.Logger + logger *slog.Logger } var _ client = &azureClient{} -// createAzureClient is a helper function for creating an Azure compute client to ARM. -func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { - cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) +// createAzureClient is a helper method for creating an Azure compute client to ARM. +func (d *Discovery) createAzureClient() (client, error) { + cloudConfiguration, err := CloudConfigurationFromName(d.cfg.Environment) if err != nil { return &azureClient{}, err } var c azureClient - c.logger = logger + c.logger = d.logger telemetry := policy.TelemetryOptions{ ApplicationID: userAgent, } - credential, err := newCredential(cfg, policy.ClientOptions{ + credential, err := newCredential(*d.cfg, policy.ClientOptions{ Cloud: cloudConfiguration, Telemetry: telemetry, }) @@ -255,7 +255,7 @@ func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { return &azureClient{}, err } - client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") + client, err := config_util.NewClientFromConfig(d.cfg.HTTPClientConfig, "azure_sd") if err != nil { return &azureClient{}, err } @@ -267,22 +267,22 @@ func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { }, } - c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options) + c.vm, err = armcompute.NewVirtualMachinesClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } - c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options) + c.nic, err = armnetwork.NewInterfacesClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } - c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options) + c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } - c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options) + c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } @@ -337,35 +337,27 @@ type virtualMachine struct { } // Create a new azureResource object from an ID string. -func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) { +func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } resourceID, err := arm.ParseResourceID(id) if err != nil { err := fmt.Errorf("invalid ID '%s': %w", id, err) - level.Error(logger).Log("err", err) + logger.Error("Failed to parse resource ID", "err", err) return &arm.ResourceID{}, err } return resourceID, nil } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - defer level.Debug(d.logger).Log("msg", "Azure discovery completed") - - client, err := createAzureClient(*d.cfg, d.logger) - if err != nil { - d.metrics.failuresCount.Inc() - return nil, fmt.Errorf("could not create Azure client: %w", err) - } - +func (d *Discovery) refreshAzureClient(ctx context.Context, client client) ([]*targetgroup.Group, error) { machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) if err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machines: %w", err) } - level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines)) + d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines)) // Load the vms managed by scale sets. scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) @@ -418,6 +410,18 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { return []*targetgroup.Group{&tg}, nil } +func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + defer d.logger.Debug("Azure discovery completed") + + client, err := d.createAzureClient() + if err != nil { + d.metrics.failuresCount.Inc() + return nil, fmt.Errorf("could not create Azure client: %w", err) + } + + return d.refreshAzureClient(ctx, client) +} + func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) { r, err := newAzureResourceFromID(vm.ID, d.logger) if err != nil { @@ -459,7 +463,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM } if err != nil { if errors.Is(err, errorNotFound) { - level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) + d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) } else { return nil, err } @@ -480,7 +484,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM // yet support this. On deallocated machines, this value happens to be nil so it // is a cheap and easy way to determine if a machine is allocated or not. if networkInterface.Properties.Primary == nil { - level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name) + d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name) return nil, nil } @@ -724,7 +728,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { rs := time.Duration(random) * time.Second exptime := time.Duration(d.cfg.RefreshInterval*10) + rs d.cache.Set(nicID, netInt, cache.WithExpiration(exptime)) - level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds()) + d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds()) } // getFromCache will get the network Interface for the specified nicID diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 32dab66c8c1..b905e9fcefa 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -15,19 +15,34 @@ package azure import ( "context" - "fmt" + "log/slog" + "net/http" + "slices" + "strings" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + fakenetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" - "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" ) +const defaultMockNetworkID string = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}" + func TestMain(m *testing.M) { goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"), @@ -96,13 +111,12 @@ func TestVMToLabelSet(t *testing.T) { vmType := "type" location := "westeurope" computerName := "computer_name" - networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1" ipAddress := "10.20.30.40" primary := true networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { - ID: &networkID, + ID: to.Ptr(defaultMockNetworkID), Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary}, }, }, @@ -139,7 +153,7 @@ func TestVMToLabelSet(t *testing.T) { Location: location, OsType: "Linux", Tags: map[string]*string{}, - NetworkInterfaces: []string{networkID}, + NetworkInterfaces: []string{defaultMockNetworkID}, Size: size, } @@ -150,11 +164,12 @@ func TestVMToLabelSet(t *testing.T) { cfg := DefaultSDConfig d := &Discovery{ cfg: &cfg, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))), } network := armnetwork.Interface{ - Name: &networkID, + Name: to.Ptr(defaultMockNetworkID), + ID: to.Ptr(defaultMockNetworkID), Properties: &armnetwork.InterfacePropertiesFormat{ Primary: &primary, IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ @@ -164,9 +179,9 @@ func TestVMToLabelSet(t *testing.T) { }, }, } - client := &mockAzureClient{ - networkInterface: &network, - } + + client := createMockAzureClient(t, nil, nil, nil, network, nil) + labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM) require.NoError(t, err) require.Len(t, labelSet, 11) @@ -475,34 +490,372 @@ func TestNewAzureResourceFromID(t *testing.T) { } } +func TestAzureRefresh(t *testing.T) { + tests := []struct { + scenario string + vmResp []armcompute.VirtualMachinesClientListAllResponse + vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse + vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse + interfacesResp armnetwork.Interface + expectedTG []*targetgroup.Group + }{ + { + scenario: "VMs, VMSS and VMSSVMs in Multiple Responses", + vmResp: []armcompute.VirtualMachinesClientListAllResponse{ + { + VirtualMachineListResult: armcompute.VirtualMachineListResult{ + Value: []*armcompute.VirtualMachine{ + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1"), to.Ptr("vm1")), + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2"), to.Ptr("vm2")), + }, + }, + }, + { + VirtualMachineListResult: armcompute.VirtualMachineListResult{ + Value: []*armcompute.VirtualMachine{ + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3"), to.Ptr("vm3")), + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4"), to.Ptr("vm4")), + }, + }, + }, + }, + vmssResp: []armcompute.VirtualMachineScaleSetsClientListAllResponse{ + { + VirtualMachineScaleSetListWithLinkResult: armcompute.VirtualMachineScaleSetListWithLinkResult{ + Value: []*armcompute.VirtualMachineScaleSet{ + { + ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1"), + Name: to.Ptr("vmScaleSet1"), + Location: to.Ptr("australiaeast"), + Type: to.Ptr("Microsoft.Compute/virtualMachineScaleSets"), + }, + }, + }, + }, + }, + vmssvmResp: []armcompute.VirtualMachineScaleSetVMsClientListResponse{ + { + VirtualMachineScaleSetVMListResult: armcompute.VirtualMachineScaleSetVMListResult{ + Value: []*armcompute.VirtualMachineScaleSetVM{ + defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1"), to.Ptr("vmScaleSet1_vm1")), + defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2"), to.Ptr("vmScaleSet1_vm2")), + }, + }, + }, + }, + interfacesResp: armnetwork.Interface{ + ID: to.Ptr(defaultMockNetworkID), + Properties: &armnetwork.InterfacePropertiesFormat{ + Primary: to.Ptr(true), + IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ + {Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ + PrivateIPAddress: to.Ptr("10.0.0.1"), + }}, + }, + }, + }, + expectedTG: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm1", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm2", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm3", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm4", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vmScaleSet1_vm1", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_scale_set": "vmScaleSet1", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vmScaleSet1_vm2", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_scale_set": "vmScaleSet1", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.scenario, func(t *testing.T) { + t.Parallel() + azureSDConfig := &DefaultSDConfig + + azureClient := createMockAzureClient(t, tc.vmResp, tc.vmssResp, tc.vmssvmResp, tc.interfacesResp, nil) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := azureSDConfig.NewDiscovererMetrics(reg, refreshMetrics) + + sd, err := NewDiscovery(azureSDConfig, nil, metrics) + require.NoError(t, err) + + tg, err := sd.refreshAzureClient(context.Background(), azureClient) + require.NoError(t, err) + + sortTargetsByID(tg[0].Targets) + require.Equal(t, tc.expectedTG, tg) + }) + } +} + type mockAzureClient struct { - networkInterface *armnetwork.Interface + azureClient } -var _ client = &mockAzureClient{} +func createMockAzureClient(t *testing.T, vmResp []armcompute.VirtualMachinesClientListAllResponse, vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse, vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse, interfaceResp armnetwork.Interface, logger *slog.Logger) client { + t.Helper() + mockVMServer := defaultMockVMServer(vmResp) + mockVMSSServer := defaultMockVMSSServer(vmssResp) + mockVMScaleSetVMServer := defaultMockVMSSVMServer(vmssvmResp) + mockInterfaceServer := defaultMockInterfaceServer(interfaceResp) + + vmClient, err := armcompute.NewVirtualMachinesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fake.NewVirtualMachinesServerTransport(&mockVMServer), + }, + }) + require.NoError(t, err) + + vmssClient, err := armcompute.NewVirtualMachineScaleSetsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fake.NewVirtualMachineScaleSetsServerTransport(&mockVMSSServer), + }, + }) + require.NoError(t, err) + + vmssvmClient, err := armcompute.NewVirtualMachineScaleSetVMsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fake.NewVirtualMachineScaleSetVMsServerTransport(&mockVMScaleSetVMServer), + }, + }) + require.NoError(t, err) -func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { - return nil, nil + interfacesClient, err := armnetwork.NewInterfacesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fakenetwork.NewInterfacesServerTransport(&mockInterfaceServer), + }, + }) + require.NoError(t, err) + + return &mockAzureClient{ + azureClient: azureClient{ + vm: vmClient, + vmss: vmssClient, + vmssvm: vmssvmClient, + nic: interfacesClient, + logger: logger, + }, + } } -func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) { - return nil, nil +func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.InterfacesServer { + return fakenetwork.InterfacesServer{ + Get: func(ctx context.Context, resourceGroupName, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) { + resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetResponse{Interface: interfaceResp}, nil) + return + }, + GetVirtualMachineScaleSetNetworkInterface: func(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) { + resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{Interface: interfaceResp}, nil) + return + }, + } +} + +func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllResponse) fake.VirtualMachinesServer { + return fake.VirtualMachinesServer{ + NewListAllPager: func(options *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) { + for _, page := range vmResp { + resp.AddPage(http.StatusOK, page, nil) + } + return + }, + } +} + +func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse) fake.VirtualMachineScaleSetsServer { + return fake.VirtualMachineScaleSetsServer{ + NewListAllPager: func(options *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) { + for _, page := range vmssResp { + resp.AddPage(http.StatusOK, page, nil) + } + return + }, + } } -func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) { - return nil, nil +func defaultMockVMSSVMServer(vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse) fake.VirtualMachineScaleSetVMsServer { + return fake.VirtualMachineScaleSetVMsServer{ + NewListPager: func(resourceGroupName, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) { + for _, page := range vmssvmResp { + resp.AddPage(http.StatusOK, page, nil) + } + return + }, + } } -func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) { - if networkInterfaceID == "" { - return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty") +func defaultVMWithIDAndName(id, name *string) *armcompute.VirtualMachine { + vmSize := armcompute.VirtualMachineSizeTypes("size") + osType := armcompute.OperatingSystemTypesLinux + defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/testVM" + defaultName := "testVM" + + if id == nil { + id = &defaultID + } + if name == nil { + name = &defaultName + } + + return &armcompute.VirtualMachine{ + ID: id, + Name: name, + Type: to.Ptr("Microsoft.Compute/virtualMachines"), + Location: to.Ptr("australiaeast"), + Properties: &armcompute.VirtualMachineProperties{ + OSProfile: &armcompute.OSProfile{ + ComputerName: to.Ptr("computer_name"), + }, + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, + }, + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + { + ID: to.Ptr(defaultMockNetworkID), + }, + }, + }, + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, + }, + }, + Tags: map[string]*string{ + "prometheus": new(string), + }, } - return m.networkInterface, nil } -func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) { - if scaleSetName == "" { - return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty") +func defaultVMSSVMWithIDAndName(id, name *string) *armcompute.VirtualMachineScaleSetVM { + vmSize := armcompute.VirtualMachineSizeTypes("size") + osType := armcompute.OperatingSystemTypesLinux + defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/testVMScaleSet/virtualMachines/testVM" + defaultName := "testVM" + + if id == nil { + id = &defaultID + } + if name == nil { + name = &defaultName + } + + return &armcompute.VirtualMachineScaleSetVM{ + ID: id, + Name: name, + Type: to.Ptr("Microsoft.Compute/virtualMachines"), + InstanceID: to.Ptr("123"), + Location: to.Ptr("australiaeast"), + Properties: &armcompute.VirtualMachineScaleSetVMProperties{ + OSProfile: &armcompute.OSProfile{ + ComputerName: to.Ptr("computer_name"), + }, + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, + }, + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + {ID: to.Ptr(defaultMockNetworkID)}, + }, + }, + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, + }, + }, + Tags: map[string]*string{ + "prometheus": new(string), + }, } - return m.networkInterface, nil +} + +func sortTargetsByID(targets []model.LabelSet) { + slices.SortFunc(targets, func(a, b model.LabelSet) int { + return strings.Compare(string(a["__meta_azure_machine_id"]), string(b["__meta_azure_machine_id"])) + }) } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index bdc1fc8dce4..fcae7b186f7 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" consul "github.com/hashicorp/consul/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -113,8 +113,11 @@ type SDConfig struct { Services []string `yaml:"services,omitempty"` // A list of tags used to filter instances inside a service. Services must contain all tags in the list. ServiceTags []string `yaml:"tags,omitempty"` - // Desired node metadata. + // Desired node metadata. As of Consul 1.14, consider `filter` instead. NodeMeta map[string]string `yaml:"node_meta,omitempty"` + // Consul filter string + // See https://www.consul.io/api-docs/catalog#filtering-1, for syntax + Filter string `yaml:"filter,omitempty"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } @@ -174,22 +177,23 @@ type Discovery struct { watchedServices []string // Set of services which will be discovered. watchedTags []string // Tags used to filter instances of a service. watchedNodeMeta map[string]string + watchedFilter string allowStale bool refreshInterval time.Duration finalizer func() - logger log.Logger + logger *slog.Logger metrics *consulMetrics } // NewDiscovery returns a new Discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*consulMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout)) @@ -218,6 +222,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere watchedServices: conf.Services, watchedTags: conf.ServiceTags, watchedNodeMeta: conf.NodeMeta, + watchedFilter: conf.Filter, allowStale: conf.AllowStale, refreshInterval: time.Duration(conf.RefreshInterval), clientDatacenter: conf.Datacenter, @@ -282,7 +287,7 @@ func (d *Discovery) getDatacenter() error { info, err := d.client.Agent().Self() if err != nil { - level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + d.logger.Error("Error retrieving datacenter name", "err", err) d.metrics.rpcFailuresCount.Inc() return err } @@ -290,12 +295,12 @@ func (d *Discovery) getDatacenter() error { dc, ok := info["Config"]["Datacenter"].(string) if !ok { err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"]) - level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + d.logger.Error("Error retrieving datacenter name", "err", err) return err } d.clientDatacenter = dc - d.logger = log.With(d.logger, "datacenter", dc) + d.logger = d.logger.With("datacenter", dc) return nil } @@ -361,13 +366,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { // entire list of services. func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) { catalog := d.client.Catalog() - level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ",")) + d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ","), "filter", d.watchedFilter) opts := &consul.QueryOptions{ WaitIndex: *lastIndex, WaitTime: watchTimeout, AllowStale: d.allowStale, NodeMeta: d.watchedNodeMeta, + Filter: d.watchedFilter, } t0 := time.Now() srvs, meta, err := catalog.Services(opts.WithContext(ctx)) @@ -382,7 +388,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. } if err != nil { - level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) + d.logger.Error("Error refreshing service list", "err", err) d.metrics.rpcFailuresCount.Inc() time.Sleep(retryInterval) return @@ -445,7 +451,7 @@ type consulService struct { discovery *Discovery client *consul.Client tagSeparator string - logger log.Logger + logger *slog.Logger rpcFailuresCount prometheus.Counter serviceRPCDuration prometheus.Observer } @@ -490,7 +496,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G // Get updates for a service. func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) { - level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ",")) + srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ",")) opts := &consul.QueryOptions{ WaitIndex: *lastIndex, @@ -513,7 +519,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr } if err != nil { - level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) + srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) srv.rpcFailuresCount.Inc() time.Sleep(retryInterval) return diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index e3bc7938f55..cdbb80baba1 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -21,10 +21,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "gopkg.in/yaml.v2" @@ -252,6 +252,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { case "/v1/catalog/services?index=1&wait=120000ms": time.Sleep(5 * time.Second) response = ServicesTestAnswer + case "/v1/catalog/services?filter=NodeMeta.rack_name+%3D%3D+%222304%22&index=1&wait=120000ms": + response = ServicesTestAnswer default: t.Errorf("Unhandled consul call: %s", r.URL) } @@ -270,7 +272,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { } func newDiscovery(t *testing.T, config *SDConfig) *Discovery { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() metrics := NewTestMetrics(t, config, prometheus.NewRegistry()) @@ -369,6 +371,27 @@ func TestAllOptions(t *testing.T) { <-ch } +// Watch the test service with a specific tag and node-meta via Filter parameter. +func TestFilterOption(t *testing.T) { + stub, config := newServer(t) + defer stub.Close() + + config.Services = []string{"test"} + config.Filter = `NodeMeta.rack_name == "2304"` + config.Token = "fake-token" + + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go func() { + d.Run(ctx, ch) + close(ch) + }() + checkOneTarget(t, <-ch) + cancel() +} + func TestGetDatacenterShouldReturnError(t *testing.T) { for _, tc := range []struct { handler func(http.ResponseWriter, *http.Request) @@ -407,7 +430,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { err = d.getDatacenter() // An error should be returned. - require.Equal(t, tc.errMessage, err.Error()) + require.EqualError(t, err, tc.errMessage) // Should still be empty. require.Equal(t, "", d.clientDatacenter) } diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index ecee60cb1f0..52f3a9c57a6 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -16,6 +16,7 @@ package digitalocean import ( "context" "fmt" + "log/slog" "net" "net/http" "strconv" @@ -23,7 +24,6 @@ import ( "time" "github.com/digitalocean/godo" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -111,7 +111,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*digitaloceanMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/digitalocean/digitalocean_test.go b/discovery/digitalocean/digitalocean_test.go index 841b5ef977a..a282225ac27 100644 --- a/discovery/digitalocean/digitalocean_test.go +++ b/discovery/digitalocean/digitalocean_test.go @@ -19,9 +19,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -57,7 +57,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) diff --git a/discovery/discovery.go b/discovery/discovery.go index a91faf6c864..c400de3632f 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -15,9 +15,9 @@ package discovery import ( "context" + "log/slog" "reflect" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -47,7 +47,7 @@ type DiscovererMetrics interface { // DiscovererOptions provides options for a Discoverer. type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger Metrics DiscovererMetrics @@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() diff --git a/util/testutil/logging.go b/discovery/discovery_test.go similarity index 57% rename from util/testutil/logging.go rename to discovery/discovery_test.go index db096ea2342..af327195f2e 100644 --- a/util/testutil/logging.go +++ b/discovery/discovery_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2024 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,25 +11,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -package testutil +package discovery import ( "testing" - "github.com/go-kit/log" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" ) -type logger struct { - t *testing.T -} - -// NewLogger returns a gokit compatible Logger which calls t.Log. -func NewLogger(t *testing.T) log.Logger { - return logger{t: t} -} +func TestConfigsCustomUnMarshalMarshal(t *testing.T) { + input := `static_configs: +- targets: + - foo:1234 + - bar:4321 +` + cfg := &Configs{} + err := yaml.UnmarshalStrict([]byte(input), cfg) + require.NoError(t, err) -// Log implements log.Logger. -func (t logger) Log(keyvals ...interface{}) error { - t.t.Log(keyvals...) - return nil + output, err := yaml.Marshal(cfg) + require.NoError(t, err) + require.Equal(t, input, string(output)) } diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 314c3d38cd5..5de7f648869 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -111,21 +111,21 @@ type Discovery struct { names []string port int qtype uint16 - logger log.Logger + logger *slog.Logger metrics *dnsMetrics - lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) + lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dnsMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } qtype := dns.TypeSRV @@ -174,7 +174,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { for _, name := range d.names { go func(n string) { if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) { - level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err) + d.logger.Error("Error refreshing DNS targets", "err", err) } wg.Done() }(name) @@ -238,7 +238,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ // CNAME responses can occur with "Type: A" dns_sd_config requests. continue default: - level.Warn(d.logger).Log("msg", "Invalid record", "record", record) + d.logger.Warn("Invalid record", "record", record) continue } tg.Targets = append(tg.Targets, model.LabelSet{ @@ -288,7 +288,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ // error will be generic-looking, because trying to return all the errors // returned by the combination of all name permutations and servers is a // nightmare. -func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { +func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { conf, err := dns.ClientConfigFromFile(resolvConf) if err != nil { return nil, fmt.Errorf("could not load resolv.conf: %w", err) @@ -337,14 +337,14 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms // A non-viable answer is "anything else", which encompasses both various // system-level problems (like network timeouts) and also // valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). -func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) { +func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) { client := &dns.Client{} for _, server := range conf.Servers { servAddr := net.JoinHostPort(server, conf.Port) msg, err := askServerForName(name, qtype, client, servAddr, true) if err != nil { - level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err) + logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err) continue } diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index 33a976827d3..96bb32491f3 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -16,11 +16,11 @@ package dns import ( "context" "fmt" + "log/slog" "net" "testing" "time" - "github.com/go-kit/log" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -40,7 +40,7 @@ func TestDNS(t *testing.T) { testCases := []struct { name string config SDConfig - lookup func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) + lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) expected []*targetgroup.Group }{ @@ -52,7 +52,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return nil, fmt.Errorf("some error") }, expected: []*targetgroup.Group{}, @@ -65,7 +65,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.A{A: net.IPv4(192, 0, 2, 2)}, @@ -97,7 +97,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "AAAA", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.AAAA{AAAA: net.IPv6loopback}, @@ -128,7 +128,7 @@ func TestDNS(t *testing.T) { Type: "SRV", RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -167,7 +167,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -198,7 +198,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{}, nil }, expected: []*targetgroup.Group{ @@ -215,7 +215,7 @@ func TestDNS(t *testing.T) { Port: 25, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.MX{Preference: 0, Mx: "smtp1.example.com."}, diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 779c081aee3..5087346486d 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "net/http" "net/url" "strconv" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -126,7 +126,7 @@ type Discovery struct { } // NewDiscovery creates a new Eureka discovery for the given role. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*eurekaMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/file/file.go b/discovery/file/file.go index e7e9d0870fc..1c36b254cca 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -26,12 +27,11 @@ import ( "time" "github.com/fsnotify/fsnotify" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" @@ -175,20 +175,20 @@ type Discovery struct { // and how many target groups they contained. // This is used to detect deleted target groups. lastRefresh map[string]int - logger log.Logger + logger *slog.Logger metrics *fileMetrics } // NewDiscovery returns a new file discovery for the given paths. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { fm, ok := metrics.(*fileMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } disc := &Discovery{ @@ -210,7 +210,7 @@ func (d *Discovery) listFiles() []string { for _, p := range d.paths { files, err := filepath.Glob(p) if err != nil { - level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err) + d.logger.Error("Error expanding glob", "glob", p, "err", err) continue } paths = append(paths, files...) @@ -231,7 +231,7 @@ func (d *Discovery) watchFiles() { p = "./" } if err := d.watcher.Add(p); err != nil { - level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err) + d.logger.Error("Error adding file watch", "path", p, "err", err) } } } @@ -240,7 +240,7 @@ func (d *Discovery) watchFiles() { func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { - level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + d.logger.Error("Error adding file watcher", "err", err) d.metrics.fileWatcherErrorsCount.Inc() return } @@ -280,7 +280,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { case err := <-d.watcher.Errors: if err != nil { - level.Error(d.logger).Log("msg", "Error watching file", "err", err) + d.logger.Error("Error watching file", "err", err) } } } @@ -300,7 +300,7 @@ func (d *Discovery) deleteTimestamp(filename string) { // stop shuts down the file watcher. func (d *Discovery) stop() { - level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) + d.logger.Debug("Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) done := make(chan struct{}) defer close(done) @@ -320,10 +320,10 @@ func (d *Discovery) stop() { } }() if err := d.watcher.Close(); err != nil { - level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) + d.logger.Error("Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) } - level.Debug(d.logger).Log("msg", "File discovery stopped") + d.logger.Debug("File discovery stopped") } // refresh reads all files matching the discovery's patterns and sends the respective @@ -339,7 +339,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) if err != nil { d.metrics.fileSDReadErrorsCount.Inc() - level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) + d.logger.Error("Error reading file", "path", p, "err", err) // Prevent deletion down below. ref[p] = d.lastRefresh[p] continue @@ -356,7 +356,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) for f, n := range d.lastRefresh { m, ok := ref[f] if !ok || n > m { - level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f) + d.logger.Debug("file_sd refresh found file that should be removed", "file", f) d.deleteTimestamp(f) for i := m; i < n; i++ { select { diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index 15f32dd2473..a509a144e19 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "golang.org/x/oauth2/google" @@ -129,7 +129,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*gceMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index df56f94c5fb..ba64250c0fa 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -15,12 +15,12 @@ package hetzner import ( "context" + "log/slog" "net" "net/http" "strconv" "time" - "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -58,7 +58,7 @@ type hcloudDiscovery struct { } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. -func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { +func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ port: conf.Port, } diff --git a/discovery/hetzner/hcloud_test.go b/discovery/hetzner/hcloud_test.go index 10b799037ad..fa8291625ab 100644 --- a/discovery/hetzner/hcloud_test.go +++ b/discovery/hetzner/hcloud_test.go @@ -18,8 +18,8 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -43,7 +43,7 @@ func TestHCloudSDRefresh(t *testing.T) { cfg.HTTPClientConfig.BearerToken = hcloudTestToken cfg.hcloudEndpoint = suite.Mock.Endpoint() - d, err := newHcloudDiscovery(&cfg, log.NewNopLogger()) + d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 69c823d3829..980c197d773 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -17,9 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "time" - "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -135,7 +135,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*hetznerMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") @@ -157,7 +157,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere ), nil } -func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { switch conf.Role { case HetznerRoleHcloud: if conf.hcloudEndpoint == "" { diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 516470b05ad..958f8f710f5 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -18,13 +18,13 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -51,7 +51,7 @@ type robotDiscovery struct { } // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. -func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { +func newRobotDiscovery(conf *SDConfig, _ *slog.Logger) (*robotDiscovery, error) { d := &robotDiscovery{ port: conf.Port, endpoint: conf.robotEndpoint, diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index abee5fea900..2618bd097cc 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -42,7 +42,7 @@ func TestRobotSDRefresh(t *testing.T) { cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword} cfg.robotEndpoint = suite.Mock.Endpoint() - d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) + d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) @@ -91,12 +91,11 @@ func TestRobotSDRefreshHandleError(t *testing.T) { cfg := DefaultSDConfig cfg.robotEndpoint = suite.Mock.Endpoint() - d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) + d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) - require.Error(t, err) - require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error()) + require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot") require.Empty(t, targetGroups) } diff --git a/discovery/http/http.go b/discovery/http/http.go index ff76fd76274..65404694c43 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -19,17 +19,18 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -40,8 +41,8 @@ import ( var ( // DefaultSDConfig is the default HTTP SD configuration. DefaultSDConfig = SDConfig{ - RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, + RefreshInterval: model.Duration(60 * time.Second), } userAgent = fmt.Sprintf("Prometheus/%s", version.Version) matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) @@ -114,14 +115,14 @@ type Discovery struct { } // NewDiscovery returns a new HTTP discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*httpMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...) diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index 0cafe035dc3..9d3a3fb5e76 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -21,11 +21,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -49,7 +49,7 @@ func TestHTTPValidRefresh(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -94,7 +94,7 @@ func TestHTTPInvalidCode(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -123,7 +123,7 @@ func TestHTTPInvalidFormat(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -442,7 +442,7 @@ func TestSourceDisappeared(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) for _, test := range cases { ctx := context.Background() diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index c8b4f7f8e54..1aa21667e35 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -16,9 +16,9 @@ package ionos import ( "errors" "fmt" + "log/slog" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -43,7 +43,7 @@ func init() { type Discovery struct{} // NewDiscovery returns a new refresh.Discovery for IONOS Cloud. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ionosMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go index a850fbbfb42..18e89b1d431 100644 --- a/discovery/ionos/server.go +++ b/discovery/ionos/server.go @@ -16,13 +16,13 @@ package ionos import ( "context" "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" ionoscloud "github.com/ionos-cloud/sdk-go/v6" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -60,7 +60,7 @@ type serverDiscovery struct { datacenterID string } -func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { +func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error) { d := &serverDiscovery{ port: conf.Port, datacenterID: conf.DatacenterID, diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 927f7f401cd..14d3bc7a99b 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,7 +33,7 @@ import ( // Endpoints discovers new endpoint targets. type Endpoints struct { - logger log.Logger + logger *slog.Logger endpointsInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -49,9 +49,9 @@ type Endpoints struct { } // NewEndpoints returns a new endpoints discovery. -func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { +func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) @@ -92,26 +92,23 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err) + l.Error("Error adding endpoints event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } - ep := &apiv1.Endpoints{} - ep.Namespace = svc.Namespace - ep.Name = svc.Name - obj, exists, err := e.endpointsStore.Get(ep) + obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name)) if exists && err == nil { e.enqueue(obj.(*apiv1.Endpoints)) } if err != nil { - level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err) + e.logger.Error("retrieving endpoints failed", "err", err) } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -131,7 +128,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, cur interface{}) { @@ -154,7 +151,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -169,13 +166,13 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca DeleteFunc: func(o interface{}) { nodeName, err := nodeName(o) if err != nil { - level.Error(l).Log("msg", "Error getting Node name", "err", err) + l.Error("Error getting Node name", "err", err) } e.enqueueNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -185,7 +182,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca func (e *Endpoints) enqueueNode(nodeName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -197,7 +194,7 @@ func (e *Endpoints) enqueueNode(nodeName string) { func (e *Endpoints) enqueuePod(podNamespacedName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err) + e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err) return } @@ -226,7 +223,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache") + e.logger.Error("endpoints informer unable to sync cache") } return } @@ -250,13 +247,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointsStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -265,7 +262,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) } eps, err := convertToEndpoints(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err) + e.logger.Error("converting to Endpoints object failed", "err", err) return true } send(ctx, ch, e.buildEndpoints(eps)) @@ -364,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -400,10 +400,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { v := eps.Labels[apiv1.EndpointsOverCapacity] if v == "truncated" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } if v == "warning" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } // For all seen pods, check all container ports. If they were not covered @@ -414,7 +414,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -431,6 +432,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -438,6 +440,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } @@ -451,13 +454,10 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -467,31 +467,27 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { } func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { - svc := &apiv1.Service{} - svc.Namespace = ns - svc.Name = name - - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } -func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet { +func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet { if nodeName == nil { return tg } obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName) if err != nil { - level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err) + logger.Error("Error getting node", "node", *nodeName, "err", err) return tg } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 3ea98c5db97..a1ac6e5d48b 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -18,10 +18,12 @@ import ( "testing" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -244,6 +246,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -259,6 +262,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -821,6 +825,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1078,6 +1083,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1089,3 +1095,186 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { }, }.Run(t) } + +func TestEndpointsDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + { + Name: "initport", + Port: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + InitContainers: []v1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "initport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpoints_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpoints/default/testsidecar", + }, + }, + }.Run(t) +} + +func BenchmarkResolvePodRef(b *testing.B) { + indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil) + e := &Endpoints{ + podStore: indexer, + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + p := e.resolvePodRef(&v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "foo", + }) + require.Nil(b, p) + } +} diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 2e57e9e554c..45bc43eff99 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -17,16 +17,15 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -36,7 +35,7 @@ import ( // EndpointSlice discovers new endpoint targets. type EndpointSlice struct { - logger log.Logger + logger *slog.Logger endpointSliceInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -52,9 +51,9 @@ type EndpointSlice struct { } // NewEndpointSlice returns a new endpointslice discovery. -func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { +func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) @@ -93,23 +92,23 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err) + l.Error("Error adding endpoint slices event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } - // TODO(brancz): use cache.Indexer to index endpoints by - // disv1beta1.LabelServiceName so this operation doesn't have to - // iterate over all endpoint objects. + // TODO(brancz): use cache.Indexer to index endpointslices by + // LabelServiceName so this operation doesn't have to iterate over all + // endpoint objects. for _, obj := range e.endpointSliceStore.List() { esa, err := e.getEndpointSliceAdaptor(obj) if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) + e.logger.Error("converting to EndpointSlice object failed", "err", err) continue } if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name { @@ -132,7 +131,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } if e.withNodeMetadata { @@ -148,13 +147,13 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod DeleteFunc: func(o interface{}) { nodeName, err := nodeName(o) if err != nil { - level.Error(l).Log("msg", "Error getting Node name", "err", err) + l.Error("Error getting Node name", "err", err) } e.enqueueNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -164,7 +163,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod func (e *EndpointSlice) enqueueNode(nodeName string) { endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -192,7 +191,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache") + e.logger.Error("endpointslice informer unable to sync cache") } return } @@ -216,13 +215,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointSliceStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -232,7 +231,7 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr esa, err := e.getEndpointSliceAdaptor(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) + e.logger.Error("converting to EndpointSlice object failed", "err", err) return true } @@ -244,8 +243,6 @@ func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAda switch endpointSlice := o.(type) { case *v1.EndpointSlice: return newEndpointSliceAdaptorFromV1(endpointSlice), nil - case *v1beta1.EndpointSlice: - return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil default: return nil, fmt.Errorf("received unexpected object: %v", o) } @@ -383,19 +380,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.port() == nil { continue } + if *port.port() == cport.ContainerPort { ports := strconv.FormatUint(uint64(*port.port()), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -423,7 +424,8 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -443,6 +445,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -450,6 +453,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } @@ -463,13 +467,10 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -480,27 +481,27 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) { var ( - svc = &apiv1.Service{} found bool + name string ) - svc.Namespace = esa.namespace() + ns := esa.namespace() // Every EndpointSlice object has the Service they belong to in the // kubernetes.io/service-name label. - svc.Name, found = esa.labels()[esa.labelServiceName()] + name, found = esa.labels()[esa.labelServiceName()] if !found { return } - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go index edd64fcb327..81243e2ce0a 100644 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ b/discovery/kubernetes/endpointslice_adaptor.go @@ -16,7 +16,6 @@ package kubernetes import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -109,59 +108,6 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string { return v1.LabelServiceName } -// Adaptor for k8s.io/api/discovery/v1beta1. -type endpointSliceAdaptorV1Beta1 struct { - endpointSlice *v1beta1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1Beta1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1Beta1) name() string { - return e.endpointSlice.Name -} - -func (e *endpointSliceAdaptorV1Beta1) namespace() string { - return e.endpointSlice.Namespace -} - -func (e *endpointSliceAdaptorV1Beta1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string { - return v1beta1.LabelServiceName -} - type endpointSliceEndpointAdaptorV1 struct { endpoint v1.Endpoint } @@ -218,62 +164,6 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { return e.endpointConditions.Terminating } -type endpointSliceEndpointAdaptorV1beta1 struct { - endpoint v1beta1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string { - return nil -} - -func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string { - return e.endpoint.Topology -} - -type endpointSliceEndpointConditionsAdaptorV1beta1 struct { - endpointConditions v1beta1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { - return e.endpointConditions.Terminating -} - type endpointSlicePortAdaptorV1 struct { endpointPort v1.EndpointPort } @@ -298,28 +188,3 @@ func (e *endpointSlicePortAdaptorV1) protocol() *string { func (e *endpointSlicePortAdaptorV1) appProtocol() *string { return e.endpointPort.AppProtocol } - -type endpointSlicePortAdaptorV1beta1 struct { - endpointPort v1beta1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1beta1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1beta1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1beta1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1beta1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string { - return e.endpointPort.AppProtocol -} diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index 1ee33371938..de33c64b661 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -18,7 +18,6 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" ) func Test_EndpointSliceAdaptor_v1(t *testing.T) { @@ -48,31 +47,3 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) } } - -func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { - endpointSlice := makeEndpointSliceV1beta1() - adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice) - - require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name()) - require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) - require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType())) - require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName]) - - for i, endpointAdaptor := range adaptor.endpoints() { - require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) - require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) - require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) - require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology()) - } - - for i, portAdaptor := range adaptor.ports() { - require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name()) - require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port()) - require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol()) - require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) - } -} diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index 6ef83081be2..cc92c7ddaa1 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -114,62 +113,8 @@ func makeEndpointSliceV1() *v1.EndpointSlice { } } -func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { - return &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - Labels: map[string]string{ - v1beta1.LabelServiceName: "testendpoints", - }, - Annotations: map[string]string{ - "test.annotation": "test", - }, - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - }, { - Addresses: []string{"3.4.5.6"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(false), - Serving: boolptr(true), - Terminating: boolptr(true), - }, - }, { - Addresses: []string{"4.5.6.7"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - TargetRef: &corev1.ObjectReference{ - Kind: "Node", - Name: "barbaz", - }, - }, - }, - } -} - func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.25.0") + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, @@ -249,71 +194,6 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }.Run(t) } -func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "1.20.0") - - k8sDiscoveryTest{ - discovery: n, - beforeRun: func() { - obj := makeEndpointSliceV1beta1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: map[string]*targetgroup.Group{ - "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - }, - Source: "endpointslice/default/testendpoints", - }, - }, - }.Run(t) -} - func TestEndpointSliceDiscoveryAdd(t *testing.T) { obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -353,25 +233,25 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { PodIP: "1.2.3.4", }, } - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.20.0", obj) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ + obj := &v1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ { Name: strptr("testport"), Port: int32ptr(9000), Protocol: protocolptr(corev1.ProtocolTCP), }, }, - Endpoints: []v1beta1.Endpoint{ + Endpoints: []v1.Endpoint{ { Addresses: []string{"4.3.2.1"}, TargetRef: &corev1.ObjectReference{ @@ -379,13 +259,13 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { Name: "testpod", Namespace: "default", }, - Conditions: v1beta1.EndpointConditions{ + Conditions: v1.EndpointConditions{ Ready: boolptr(false), }, }, }, } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ @@ -411,6 +291,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -426,6 +307,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -440,118 +322,34 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { } func TestEndpointSliceDiscoveryDelete(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpointSliceV1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Source: "endpointslice/default/testendpoints", - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", - }, }, }, }.Run(t) } func TestEndpointSliceDiscoveryUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - }, - }, - }, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.ObjectMeta.Labels = nil + obj.ObjectMeta.Annotations = nil + obj.Endpoints = obj.Endpoints[0:2] + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ @@ -586,39 +384,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -626,85 +396,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { } func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{}, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.Endpoints = []v1.Endpoint{} + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", @@ -721,7 +424,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { } func TestEndpointSliceDiscoveryWithService(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, @@ -813,7 +516,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { } func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, @@ -1285,6 +988,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1498,3 +1202,165 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) { }) } } + +func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ + { + Name: strptr("testport"), + Port: int32ptr(9000), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + { + Name: strptr("initport"), + Port: int32ptr(9111), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + }, + Endpoints: []v1.Endpoint{ + { + Addresses: []string{"4.3.2.1"}, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + InitContainers: []corev1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpointslice/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9111", + "__meta_kubernetes_endpointslice_port_name": "initport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpointslice/default/testsidecar", + }, + }, + }.Run(t) +} diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 7b6366b257b..1b7847c5c46 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -17,14 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,14 +31,14 @@ import ( // Ingress implements discovery of Kubernetes ingress. type Ingress struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewIngress returns a new ingress discovery. -func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { +func NewIngress(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) @@ -67,7 +65,7 @@ func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err) + l.Error("Error adding ingresses event handler.", "err", err) } return s } @@ -87,7 +85,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(i.logger).Log("msg", "ingress informer unable to sync cache") + i.logger.Error("ingress informer unable to sync cache") } return } @@ -127,10 +125,8 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b switch ingress := o.(type) { case *v1.Ingress: ia = newIngressAdaptorFromV1(ingress) - case *v1beta1.Ingress: - ia = newIngressAdaptorFromV1beta1(ingress) default: - level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", + i.logger.Error("converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) return true } diff --git a/discovery/kubernetes/ingress_adaptor.go b/discovery/kubernetes/ingress_adaptor.go index d1a7b7f2a2f..84281196b4a 100644 --- a/discovery/kubernetes/ingress_adaptor.go +++ b/discovery/kubernetes/ingress_adaptor.go @@ -15,7 +15,6 @@ package kubernetes import ( v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -89,56 +88,3 @@ func (i *ingressRuleAdaptorV1) paths() []string { } func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } - -// Adaptor for networking.k8s.io/v1beta1. -type ingressAdaptorV1Beta1 struct { - ingress *v1beta1.Ingress -} - -func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { - return &ingressAdaptorV1Beta1{ingress: ingress} -} -func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1Beta1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1Beta1 struct { - rule v1beta1.IngressRule -} - -func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1Beta1{rule: rule} -} - -func (i *ingressRuleAdaptorV1Beta1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host } diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go index 8e6654c2ccb..9bddfb1e14f 100644 --- a/discovery/kubernetes/ingress_test.go +++ b/discovery/kubernetes/ingress_test.go @@ -20,7 +20,6 @@ import ( "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -89,60 +88,6 @@ func makeIngress(tls TLSMode) *v1.Ingress { return ret } -func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress { - ret := &v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testingress", - Namespace: "default", - Labels: map[string]string{"test/label": "testvalue"}, - Annotations: map[string]string{"test/annotation": "testannotationvalue"}, - }, - Spec: v1beta1.IngressSpec{ - IngressClassName: classString("testclass"), - TLS: nil, - Rules: []v1beta1.IngressRule{ - { - Host: "example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - {Path: "/"}, - {Path: "/foo"}, - }, - }, - }, - }, - { - // No backend config, ignored - Host: "nobackend.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{}, - }, - }, - { - Host: "test.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{{}}, - }, - }, - }, - }, - }, - } - - switch tls { - case TLSYes: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}} - case TLSMixed: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}} - case TLSWildcard: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}} - } - - return ret -} - func classString(v string) *string { return &v } @@ -212,20 +157,6 @@ func TestIngressDiscoveryAdd(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSNo) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSNo), - }.Run(t) -} - func TestIngressDiscoveryAddTLS(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) @@ -240,20 +171,6 @@ func TestIngressDiscoveryAddTLS(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSYes) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSYes), - }.Run(t) -} - func TestIngressDiscoveryAddMixed(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) @@ -268,20 +185,6 @@ func TestIngressDiscoveryAddMixed(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSMixed) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSMixed), - }.Run(t) -} - func TestIngressDiscoveryNamespaces(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) @@ -303,27 +206,6 @@ func TestIngressDiscoveryNamespaces(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0") - - expected := expectedTargetGroups("ns1", TLSNo) - for k, v := range expectedTargetGroups("ns2", TLSNo) { - expected[k] = v - } - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - for _, ns := range []string{"ns1", "ns2"} { - obj := makeIngressV1beta1(TLSNo) - obj.Namespace = ns - c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - } - }, - expectedMaxItems: 2, - expectedRes: expected, - }.Run(t) -} - func TestIngressDiscoveryOwnNamespace(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true}) diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 7d20732f2ea..64e8886cfdf 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "reflect" "strings" @@ -25,23 +26,18 @@ import ( "github.com/prometheus/prometheus/util/strutil" - disv1beta1 "k8s.io/api/discovery/v1beta1" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" networkv1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -264,7 +260,7 @@ type Discovery struct { sync.RWMutex client kubernetes.Interface role Role - logger log.Logger + logger *slog.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discovery.Discoverer selectors roleSelector @@ -289,14 +285,14 @@ func (d *Discovery) getNamespaces() []string { } // New creates a new Kubernetes discovery for the given role. -func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { +func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { m, ok := metrics.(*kubernetesMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } var ( kcfg *rest.Config @@ -328,7 +324,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di ownNamespace = string(ownNamespaceContents) } - level.Info(l).Log("msg", "Using pod service account via in-cluster config") + l.Info("Using pod service account via in-cluster config") default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { @@ -401,55 +397,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { switch d.role { case RoleEndpointSlice: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkDiscoveryV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedIndexInformer - if v1Supported { - e := d.client.DiscoveryV1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) - } else { - e := d.client.DiscoveryV1beta1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{}) + e := d.client.DiscoveryV1().EndpointSlices(namespace) + elw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.Watch(ctx, options) + }, } + informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ @@ -483,7 +446,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go nodeInf.Run(ctx.Done()) } eps := NewEndpointSlice( - log.With(d.logger, "role", "endpointslice"), + d.logger.With("role", "endpointslice"), informer, d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -543,7 +506,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } eps := NewEndpoints( - log.With(d.logger, "role", "endpoint"), + d.logger.With("role", "endpoint"), d.newEndpointsByNodeInformer(elw), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -577,7 +540,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } pod := NewPod( - log.With(d.logger, "role", "pod"), + d.logger.With("role", "pod"), d.newPodsByNodeInformer(plw), nodeInformer, d.metrics.eventCount, @@ -601,7 +564,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } svc := NewService( - log.With(d.logger, "role", "service"), + d.logger.With("role", "service"), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.metrics.eventCount, ) @@ -609,57 +572,24 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go svc.informer.Run(ctx.Done()) } case RoleIngress: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkNetworkingV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedInformer - if v1Supported { - i := d.client.NetworkingV1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) - } else { - i := d.client.NetworkingV1beta1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) + i := d.client.NetworkingV1().Ingresses(namespace) + ilw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.Watch(ctx, options) + }, } + informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) ingress := NewIngress( - log.With(d.logger, "role", "ingress"), + d.logger.With("role", "ingress"), informer, d.metrics.eventCount, ) @@ -668,11 +598,11 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } case RoleNode: nodeInformer := d.newNodeInformer(ctx) - node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount) + node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: - level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role) + d.logger.Error("unknown Kubernetes discovery kind", "role", d.role) } var wg sync.WaitGroup @@ -720,20 +650,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) ( } } -func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // networking.k8s.io/v1 is available since Kubernetes v1.19 - // https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md - return semVer.Major() >= 1 && semVer.Minor() >= 19, nil -} - func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { nlw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { @@ -834,19 +750,6 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object } } } - case *disv1beta1.EndpointSlice: - for _, target := range e.Endpoints { - if target.TargetRef != nil { - switch target.TargetRef.Kind { - case "Pod": - if target.NodeName != nil { - nodes = append(nodes, *target.NodeName) - } - case "Node": - nodes = append(nodes, target.TargetRef.Name) - } - } - } default: return nil, fmt.Errorf("object is not an endpointslice") } @@ -882,21 +785,6 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb return informer } -func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25. - // discovery.k8s.io/v1 is available since Kubernetes v1.21 - // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 - return semVer.Major() >= 1 && semVer.Minor() >= 21, nil -} - func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index b905a7a7bf0..a14f2b3d1b3 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -20,8 +20,8 @@ import ( "testing" "time" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" apiv1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -48,7 +48,7 @@ func TestMain(m *testing.M) { // makeDiscovery creates a kubernetes.Discovery instance for testing. func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { - return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...) + return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...) } // makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing. @@ -73,7 +73,7 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer d := &Discovery{ client: clientset, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), role: role, namespaceDiscovery: &nsDiscovery, ownNamespace: "own-ns", @@ -287,40 +287,6 @@ func TestRetryOnError(t *testing.T) { } } -func TestCheckNetworkingV1Supported(t *testing.T) { - tests := []struct { - version string - wantSupported bool - wantErr bool - }{ - {version: "v1.18.0", wantSupported: false, wantErr: false}, - {version: "v1.18.1", wantSupported: false, wantErr: false}, - // networking v1 is supported since Kubernetes v1.19 - {version: "v1.19.0", wantSupported: true, wantErr: false}, - {version: "v1.20.0-beta.2", wantSupported: true, wantErr: false}, - // error patterns - {version: "", wantSupported: false, wantErr: true}, - {version: "<>", wantSupported: false, wantErr: true}, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.version, func(t *testing.T) { - clientset := fake.NewSimpleClientset() - fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) - fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version} - supported, err := checkNetworkingV1Supported(clientset) - - if tc.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - require.Equal(t, tc.wantSupported, supported) - }) - } -} - func TestFailuresCountMetric(t *testing.T) { tests := []struct { role Role diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 35923ae1998..0e0c5745f24 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -38,16 +38,16 @@ const ( // Node discovers Kubernetes nodes. type Node struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewNode returns a new node discovery. -func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { +func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) @@ -76,7 +76,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.Coun }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } return n } @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(n.logger).Log("msg", "node informer unable to sync cache") + n.logger.Error("node informer unable to sync cache") } return } @@ -133,7 +133,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } node, err := convertToNode(o) if err != nil { - level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err) + n.logger.Error("converting to Node object failed", "err", err) return true } send(ctx, ch, n.buildNode(node)) @@ -181,7 +181,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { addr, addrMap, err := nodeAddress(node) if err != nil { - level.Warn(n.logger).Log("msg", "No node address found", "err", err) + n.logger.Warn("No node address found", "err", err) return nil } addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index a3d12f97a81..8704a662395 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -17,14 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" @@ -44,14 +44,14 @@ type Pod struct { nodeInf cache.SharedInformer withNodeMetadata bool store cache.Store - logger log.Logger + logger *slog.Logger queue *workqueue.Type } // NewPod creates a new pod discovery. -func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { +func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) @@ -81,7 +81,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if p.withNodeMetadata { @@ -97,13 +97,13 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo DeleteFunc: func(o interface{}) { nodeName, err := nodeName(o) if err != nil { - level.Error(l).Log("msg", "Error getting Node name", "err", err) + l.Error("Error getting Node name", "err", err) } p.enqueuePodsForNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } } @@ -130,7 +130,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(p.logger).Log("msg", "pod informer unable to sync cache") + p.logger.Error("pod informer unable to sync cache") } return } @@ -167,7 +167,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } pod, err := convertToPod(o) if err != nil { - level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err) + p.logger.Error("converting to Pod object failed", "err", err) return true } send(ctx, ch, p.buildPod(pod)) @@ -249,7 +249,7 @@ func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containe func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { cStatus, err := p.findPodContainerStatus(statuses, containerName) if err != nil { - level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) + p.logger.Debug("cannot find container ID", "err", err) return "" } return cStatus.ContainerID @@ -318,7 +318,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { func (p *Pod) enqueuePodsForNode(nodeName string) { pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(p.logger).Log("msg", "Error getting pods for node", "node", nodeName, "err", err) + p.logger.Error("Error getting pods for node", "node", nodeName, "err", err) return } diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 51204a5a1af..e666497c864 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,16 +33,16 @@ import ( // Service implements discovery of Kubernetes services. type Service struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewService returns a new service discovery. -func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { +func NewService(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) @@ -71,7 +71,7 @@ func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } return s } @@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(s.logger).Log("msg", "service informer unable to sync cache") + s.logger.Error("service informer unable to sync cache") } return } @@ -128,7 +128,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b } eps, err := convertToService(o) if err != nil { - level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err) + s.logger.Error("converting to Service object failed", "err", err) return true } send(ctx, ch, s.buildService(eps)) diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go deleted file mode 100644 index 6fc61485d11..00000000000 --- a/discovery/legacymanager/manager.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "reflect" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -type poolKey struct { - setName string - provider string -} - -// provider holds a Discoverer instance, its configuration and its subscribers. -type provider struct { - name string - d discovery.Discoverer - subs []string - config interface{} -} - -// NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager { - if logger == nil { - logger = log.NewNopLogger() - } - mgr := &Manager{ - logger: logger, - syncCh: make(chan map[string][]*targetgroup.Group), - targets: make(map[poolKey]map[string]*targetgroup.Group), - discoverCancel: []context.CancelFunc{}, - ctx: ctx, - updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), - registerer: registerer, - sdMetrics: sdMetrics, - } - for _, option := range options { - option(mgr) - } - - // Register the metrics. - // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) - return nil - } - - return mgr -} - -// Name sets the name of the manager. -func Name(n string) func(*Manager) { - return func(m *Manager) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.name = n - } -} - -// Manager maintains a set of discovery providers and sends each update to a map channel. -// Targets are grouped by the target set name. -type Manager struct { - logger log.Logger - name string - mtx sync.RWMutex - ctx context.Context - discoverCancel []context.CancelFunc - - // Some Discoverers(eg. k8s) send only the updates for a given target group - // so we use map[tg.Source]*targetgroup.Group to know which group to update. - targets map[poolKey]map[string]*targetgroup.Group - // providers keeps track of SD providers. - providers []*provider - // The sync channel sends the updates as a map where the key is the job value from the scrape config. - syncCh chan map[string][]*targetgroup.Group - - // How long to wait before sending updates to the channel. The variable - // should only be modified in unit tests. - updatert time.Duration - - // The triggerSend channel signals to the manager that new updates have been received from providers. - triggerSend chan struct{} - - // A registerer for all service discovery metrics. - registerer prometheus.Registerer - - metrics *discovery.Metrics - sdMetrics map[string]discovery.DiscovererMetrics -} - -// Run starts the background processing. -func (m *Manager) Run() error { - go m.sender() - <-m.ctx.Done() - m.cancelDiscoverers() - return m.ctx.Err() -} - -// SyncCh returns a read only channel used by all the clients to receive target updates. -func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { - return m.syncCh -} - -// ApplyConfig removes all running discovery providers and starts new ones using the provided config. -func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - for pk := range m.targets { - if _, ok := cfg[pk.setName]; !ok { - m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName) - } - } - m.cancelDiscoverers() - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil - - failedCount := 0 - for name, scfg := range cfg { - failedCount += m.registerProviders(scfg, name) - m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0) - } - m.metrics.FailedConfigs.Set(float64(failedCount)) - - for _, prov := range m.providers { - m.startProvider(m.ctx, prov) - } - - return nil -} - -// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. -func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) { - p := &provider{ - name: name, - d: worker, - subs: []string{name}, - } - m.providers = append(m.providers, p) - m.startProvider(ctx, p) -} - -func (m *Manager) startProvider(ctx context.Context, p *provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) - ctx, cancel := context.WithCancel(ctx) - updates := make(chan []*targetgroup.Group) - - m.discoverCancel = append(m.discoverCancel, cancel) - - go p.d.Run(ctx, updates) - go m.updater(ctx, p, updates) -} - -func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { - for { - select { - case <-ctx.Done(): - return - case tgs, ok := <-updates: - m.metrics.ReceivedUpdates.Inc() - if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) - return - } - - for _, s := range p.subs { - m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) - } - - select { - case m.triggerSend <- struct{}{}: - default: - } - } - } -} - -func (m *Manager) sender() { - ticker := time.NewTicker(m.updatert) - defer ticker.Stop() - - for { - select { - case <-m.ctx.Done(): - return - case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. - select { - case <-m.triggerSend: - m.metrics.SentUpdates.Inc() - select { - case m.syncCh <- m.allGroups(): - default: - m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") - select { - case m.triggerSend <- struct{}{}: - default: - } - } - default: - } - } - } -} - -func (m *Manager) cancelDiscoverers() { - for _, c := range m.discoverCancel { - c() - } -} - -func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if _, ok := m.targets[poolKey]; !ok { - m.targets[poolKey] = make(map[string]*targetgroup.Group) - } - for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg - } - } -} - -func (m *Manager) allGroups() map[string][]*targetgroup.Group { - m.mtx.RLock() - defer m.mtx.RUnlock() - - tSets := map[string][]*targetgroup.Group{} - n := map[string]int{} - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) - } - } - for setName, v := range n { - m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) - } - return tSets -} - -// registerProviders returns a number of failed SD config. -func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int { - var ( - failed int - added bool - ) - add := func(cfg discovery.Config) { - for _, p := range m.providers { - if reflect.DeepEqual(cfg, p.config) { - p.subs = append(p.subs, setName) - added = true - return - } - } - typ := cfg.Name() - d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), - Metrics: m.sdMetrics[typ], - }) - if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) - failed++ - return - } - m.providers = append(m.providers, &provider{ - name: fmt.Sprintf("%s/%d", typ, len(m.providers)), - d: d, - config: cfg, - subs: []string{setName}, - }) - added = true - } - for _, cfg := range cfgs { - add(cfg) - } - if !added { - // Add an empty target group to force the refresh of the corresponding - // scrape pool and to notify the receiver that this target set has no - // current targets. - // It can happen because the combined set of SD configurations is empty - // or because we fail to instantiate all the SD configurations. - add(discovery.StaticConfig{{}}) - } - return failed -} - -// StaticProvider holds a list of target groups that never change. -type StaticProvider struct { - TargetGroups []*targetgroup.Group -} - -// Run implements the Worker interface. -func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { - // We still have to consider that the consumer exits right away in which case - // the context will be canceled. - select { - case ch <- sd.TargetGroups: - case <-ctx.Done(): - } - close(ch) -} diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go deleted file mode 100644 index f1be9631136..00000000000 --- a/discovery/legacymanager/manager_test.go +++ /dev/null @@ -1,1185 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "sort" - "strconv" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - client_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/testutil" -) - -func TestMain(m *testing.M) { - testutil.TolerantVerifyLeak(m) -} - -func newTestMetrics(t *testing.T, reg prometheus.Registerer) (*discovery.RefreshMetricsManager, map[string]discovery.DiscovererMetrics) { - refreshMetrics := discovery.NewRefreshMetrics(reg) - sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics) - require.NoError(t, err) - return &refreshMetrics, sdMetrics -} - -// TestTargetUpdatesOrder checks that the target updates are received in the expected order. -func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter - // Final targets array is ordered alphabetically by the name of the discoverer. - // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. - testCases := []struct { - title string - updates map[string][]update - expectedTargets [][]*targetgroup.Group - }{ - { - title: "Single TP no updates", - updates: map[string][]update{ - "tp1": {}, - }, - expectedTargets: nil, - }, - { - title: "Multiple TPs no updates", - updates: map[string][]update{ - "tp1": {}, - "tp2": {}, - "tp3": {}, - }, - expectedTargets: nil, - }, - { - title: "Single TP empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - }, - }, - { - title: "Multiple TPs empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{}, - interval: 200 * time.Millisecond, - }, - }, - "tp3": { - { - targetGroups: []targetgroup.Group{}, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - {}, - {}, - }, - }, - { - title: "Single TP initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - }, - }, - { - title: "Single TP initials followed by empty updates", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - }, - }, - { - title: "Single TP initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 500 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - { - title: "One TP initials arrive after other TP updates.", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 150 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 200 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - - { - title: "Single TP empty update in between", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 30 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 300 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - }, - }, - } - - for i, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - - var totalUpdatesCount int - for _, up := range tc.updates { - if len(up) > 0 { - totalUpdatesCount += len(up) - } - } - provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount) - - for _, up := range tc.updates { - go newMockDiscoveryProvider(up...).Run(ctx, provUpdates) - } - - for x := 0; x < totalUpdatesCount; x++ { - select { - case <-ctx.Done(): - t.Fatalf("%d: no update arrived within the timeout limit", x) - case tgs := <-provUpdates: - discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) - for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x]) - } - } - } - }) - } -} - -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { - t.Helper() - - // Need to sort by the groups's source as the received order is not guaranteed. - sort.Sort(byGroupSource(got)) - sort.Sort(byGroupSource(expected)) - - require.Equal(t, expected, got) -} - -func staticConfig(addrs ...string) discovery.StaticConfig { - var cfg discovery.StaticConfig - for i, addr := range addrs { - cfg = append(cfg, &targetgroup.Group{ - Source: strconv.Itoa(i), - Targets: []model.LabelSet{ - {model.AddressLabel: model.LabelValue(addr)}, - }, - }) - } - return cfg -} - -func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { - t.Helper() - if _, ok := tSets[poolKey]; !ok { - t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) - } - - match := false - var mergedTargets string - for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { - mergedTargets = mergedTargets + " " + l.String() - if l.String() == label { - match = true - } - } - } - if match != present { - msg := "" - if !present { - msg = "not" - } - t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) - } -} - -func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) -} - -func TestDiscovererConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - staticConfig("baz:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) -} - -// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. -func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - discovery.StaticConfig{{}}, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - - pkey := poolKey{setName: "prometheus", provider: "static/0"} - targetGroups, ok := discoveryManager.targets[pkey] - if !ok { - t.Fatalf("'%v' should be present in target groups", pkey) - } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } -} - -func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - "prometheus2": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - if len(discoveryManager.providers) != 1 { - t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) - } -} - -func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { - originalConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - processedConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - cfgs := map[string]discovery.Configs{ - "prometheus": processedConfig, - } - discoveryManager.ApplyConfig(cfgs) - <-discoveryManager.SyncCh() - - for _, cfg := range cfgs { - require.Equal(t, originalConfig, cfg) - } -} - -type errorConfig struct{ err error } - -func (e errorConfig) Name() string { return "error" } -func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Discoverer, error) { - return nil, e.err -} - -// NewDiscovererMetrics implements discovery.Config. -func (errorConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { - return &discovery.NoopDiscovererMetrics{} -} - -func TestGaugeFailedConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - errorConfig{fmt.Errorf("tests error 0")}, - errorConfig{fmt.Errorf("tests error 1")}, - errorConfig{fmt.Errorf("tests error 2")}, - }, - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 3 { - t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) - } - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 0 { - t.Fatalf("Expected to get no failed config, got: %v", failedCount) - } -} - -func TestCoordinationWithReceiver(t *testing.T) { - updateDelay := 100 * time.Millisecond - - type expect struct { - delay time.Duration - tgs map[string][]*targetgroup.Group - } - - testCases := []struct { - title string - providers map[string]discovery.Discoverer - expected []expect - }{ - { - title: "Receiver should get all updates even when one provider closes its channel", - providers: map[string]discovery.Discoverer{ - "once1": &onceProvider{ - tgs: []*targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - "mock1": newMockDiscoveryProvider( - update{ - interval: 2 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - "mock1": { - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - { - title: "Receiver should get all updates even when the channel is blocked", - providers: map[string]discovery.Discoverer{ - "mock1": newMockDiscoveryProvider( - update{ - targetGroups: []targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - update{ - interval: 4 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - delay: 2 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - delay: 4 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - mgr := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, mgr) - mgr.updatert = updateDelay - go mgr.Run() - - for name, p := range tc.providers { - mgr.StartCustomProvider(ctx, name, p) - } - - for i, expected := range tc.expected { - time.Sleep(expected.delay) - select { - case <-ctx.Done(): - t.Fatalf("step %d: no update received in the expected timeframe", i) - case tgs, ok := <-mgr.SyncCh(): - if !ok { - t.Fatalf("step %d: discovery manager channel is closed", i) - } - if len(tgs) != len(expected.tgs) { - t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", - i, len(tgs), len(expected.tgs), tgs, expected.tgs) - } - for k := range expected.tgs { - if _, ok := tgs[k]; !ok { - t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) - } - assertEqualGroups(t, tgs[k], expected.tgs[k]) - } - } - } - }) - } -} - -type update struct { - targetGroups []targetgroup.Group - interval time.Duration -} - -type mockdiscoveryProvider struct { - updates []update -} - -func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider { - tp := mockdiscoveryProvider{ - updates: updates, - } - return tp -} - -func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) { - for _, u := range tp.updates { - if u.interval > 0 { - select { - case <-ctx.Done(): - return - case <-time.After(u.interval): - } - } - tgs := make([]*targetgroup.Group, len(u.targetGroups)) - for i := range u.targetGroups { - tgs[i] = &u.targetGroups[i] - } - upCh <- tgs - } - <-ctx.Done() -} - -// byGroupSource implements sort.Interface so we can sort by the Source field. -type byGroupSource []*targetgroup.Group - -func (a byGroupSource) Len() int { return len(a) } -func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } - -// onceProvider sends updates once (if any) and closes the update channel. -type onceProvider struct { - tgs []*targetgroup.Group -} - -func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { - if len(o.tgs) > 0 { - ch <- o.tgs - } - close(ch) -} diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go deleted file mode 100644 index 955705394d9..00000000000 --- a/discovery/legacymanager/registry.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "gopkg.in/yaml.v2" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -const ( - configFieldPrefix = "AUTO_DISCOVERY_" - staticConfigsKey = "static_configs" - staticConfigsFieldName = configFieldPrefix + staticConfigsKey -) - -var ( - configNames = make(map[string]discovery.Config) - configFieldNames = make(map[reflect.Type]string) - configFields []reflect.StructField - - configTypesMu sync.Mutex - configTypes = make(map[reflect.Type]reflect.Type) - - emptyStructType = reflect.TypeOf(struct{}{}) - configsType = reflect.TypeOf(discovery.Configs{}) -) - -// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. -func RegisterConfig(config discovery.Config) { - registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) -} - -func init() { - // N.B.: static_configs is the only Config type implemented by default. - // All other types are registered at init by their implementing packages. - elemTyp := reflect.TypeOf(&targetgroup.Group{}) - registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{}) -} - -func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) { - name := config.Name() - if _, ok := configNames[name]; ok { - panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) - } - configNames[name] = config - - fieldName := configFieldPrefix + yamlKey // Field must be exported. - configFieldNames[elemType] = fieldName - - // Insert fields in sorted order. - i := sort.Search(len(configFields), func(k int) bool { - return fieldName < configFields[k].Name - }) - configFields = append(configFields, reflect.StructField{}) // Add empty field at end. - copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. - configFields[i] = reflect.StructField{ // Write new field in place. - Name: fieldName, - Type: reflect.SliceOf(elemType), - Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), - } -} - -func getConfigType(out reflect.Type) reflect.Type { - configTypesMu.Lock() - defer configTypesMu.Unlock() - if typ, ok := configTypes[out]; ok { - return typ - } - // Initial exported fields map one-to-one. - var fields []reflect.StructField - for i, n := 0, out.NumField(); i < n; i++ { - switch field := out.Field(i); { - case field.PkgPath == "" && field.Type != configsType: - fields = append(fields, field) - default: - fields = append(fields, reflect.StructField{ - Name: "_" + field.Name, // Field must be unexported. - PkgPath: out.PkgPath(), - Type: emptyStructType, - }) - } - } - // Append extra config fields on the end. - fields = append(fields, configFields...) - typ := reflect.StructOf(fields) - configTypes[out] = typ - return typ -} - -// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs -// that have a Configs field that should be inlined. -func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { - outVal := reflect.ValueOf(out) - if outVal.Kind() != reflect.Ptr { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outVal = outVal.Elem() - if outVal.Kind() != reflect.Struct { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outTyp := outVal.Type() - - cfgTyp := getConfigType(outTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields (defaults) to dynamic value. - var configs *discovery.Configs - for i, n := 0, outVal.NumField(); i < n; i++ { - if outTyp.Field(i).Type == configsType { - configs = outVal.Field(i).Addr().Interface().(*discovery.Configs) - continue - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(outVal.Field(i)) - } - if configs == nil { - return fmt.Errorf("discovery: Configs field not found in type: %T", out) - } - - // Unmarshal into dynamic value. - if err := unmarshal(cfgPtr.Interface()); err != nil { - return replaceYAMLTypeError(err, cfgTyp, outTyp) - } - - // Copy shared fields from dynamic value. - for i, n := 0, outVal.NumField(); i < n; i++ { - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - outVal.Field(i).Set(cfgVal.Field(i)) - } - - var err error - *configs, err = readConfigs(cfgVal, outVal.NumField()) - return err -} - -func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) { - var ( - configs discovery.Configs - targets []*targetgroup.Group - ) - for i, n := startField, structVal.NumField(); i < n; i++ { - field := structVal.Field(i) - if field.Kind() != reflect.Slice { - panic("discovery: internal error: field is not a slice") - } - for k := 0; k < field.Len(); k++ { - val := field.Index(k) - if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { - key := configFieldNames[field.Type().Elem()] - key = strings.TrimPrefix(key, configFieldPrefix) - return nil, fmt.Errorf("empty or null section in %s", key) - } - switch c := val.Interface().(type) { - case *targetgroup.Group: - // Add index to the static config target groups for unique identification - // within scrape pool. - c.Source = strconv.Itoa(len(targets)) - // Coalesce multiple static configs into a single static config. - targets = append(targets, c) - case discovery.Config: - configs = append(configs, c) - default: - panic("discovery: internal error: slice element is not a Config") - } - } - } - if len(targets) > 0 { - configs = append(configs, discovery.StaticConfig(targets)) - } - return configs, nil -} - -// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs -// that have a Configs field that should be inlined. -func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { - inVal := reflect.ValueOf(in) - for inVal.Kind() == reflect.Ptr { - inVal = inVal.Elem() - } - inTyp := inVal.Type() - - cfgTyp := getConfigType(inTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields to dynamic value. - var configs *discovery.Configs - for i, n := 0, inTyp.NumField(); i < n; i++ { - if inTyp.Field(i).Type == configsType { - configs = inVal.Field(i).Addr().Interface().(*discovery.Configs) - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(inVal.Field(i)) - } - if configs == nil { - return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) - } - - if err := writeConfigs(cfgVal, *configs); err != nil { - return nil, err - } - - return cfgPtr.Interface(), nil -} - -func writeConfigs(structVal reflect.Value, configs discovery.Configs) error { - targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) - for _, c := range configs { - if sc, ok := c.(discovery.StaticConfig); ok { - *targets = append(*targets, sc...) - continue - } - fieldName, ok := configFieldNames[reflect.TypeOf(c)] - if !ok { - return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) - } - field := structVal.FieldByName(fieldName) - field.Set(reflect.Append(field, reflect.ValueOf(c))) - } - return nil -} - -func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { - var e *yaml.TypeError - if errors.As(err, &e) { - oldStr := oldTyp.String() - newStr := newTyp.String() - for i, s := range e.Errors { - e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) - } - } - return err -} diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 634a6b1d4bb..dfc12417c0e 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/linode/linodego" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -138,7 +138,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*linodeMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index 3c106506534..7bcaa05ba4d 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -19,10 +19,10 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -238,7 +238,7 @@ func TestLinodeSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Endpoint()) require.NoError(t, err) diff --git a/discovery/manager.go b/discovery/manager.go index cefa90a8669..87e0ecc44b5 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -16,14 +16,14 @@ package discovery import ( "context" "fmt" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } mgr := &Manager{ logger: logger, @@ -104,7 +104,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { mgr.metrics = metrics } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } @@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger + logger *slog.Logger name string httpOpts []config.HTTPClientOption mtx sync.RWMutex @@ -294,7 +294,7 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D } func (m *Manager) startProvider(ctx context.Context, p *Provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) @@ -328,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case tgs, ok := <-updates: m.metrics.ReceivedUpdates.Inc() if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + m.logger.Debug("Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. <-ctx.Done() return @@ -364,7 +364,7 @@ func (m *Manager) sender() { case m.syncCh <- m.allGroups(): default: m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: default: @@ -458,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), + Logger: m.logger.With("discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, Metrics: m.sdMetrics[typ], }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) + m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 831cefe514d..b882c0b02ea 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -22,10 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -675,7 +675,7 @@ func TestTargetUpdatesOrder(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond @@ -791,7 +791,7 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -828,7 +828,7 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -868,7 +868,7 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -911,7 +911,7 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -979,7 +979,7 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1023,7 +1023,7 @@ func TestDiscovererConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1060,7 +1060,7 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1141,7 +1141,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1202,7 +1202,7 @@ func TestGaugeFailedConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1454,7 +1454,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1551,7 +1551,7 @@ func TestUnregisterMetrics(t *testing.T) { refreshMetrics, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) // discoveryManager will be nil if there was an error configuring metrics. require.NotNil(t, discoveryManager) // Unregister all metrics. diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 38b47accffb..f81a4410ebe 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math/rand" "net" "net/http" @@ -27,7 +28,6 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -140,7 +140,7 @@ type Discovery struct { } // NewDiscovery returns a new Marathon Discovery. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*marathonMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 68f6fe3ccc1..1a732c0502c 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -16,6 +16,7 @@ package moby import ( "context" "fmt" + "log/slog" "net" "net/http" "net/url" @@ -28,7 +29,6 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -128,7 +128,7 @@ type DockerDiscovery struct { } // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. -func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { +func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { m, ok := metrics.(*dockerMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index 398393a15ae..00e6a3e4f36 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -19,9 +19,9 @@ import ( "sort" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -226,7 +226,7 @@ host: %s require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index b0147467d28..9e93e581f32 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -16,13 +16,13 @@ package moby import ( "context" "fmt" + "log/slog" "net/http" "net/url" "time" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -125,7 +125,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dockerswarmMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/moby/mock_test.go b/discovery/moby/mock_test.go index 3f35258c8f4..7ef5cb07c35 100644 --- a/discovery/moby/mock_test.go +++ b/discovery/moby/mock_test.go @@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() { if len(query) == 2 { h := sha1.New() h.Write([]byte(query[1])) - // Avoing long filenames for Windows. + // Avoiding long filenames for Windows. f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] } } diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go index 4ad1088d1ab..973b83c4b62 100644 --- a/discovery/moby/nodes_test.go +++ b/discovery/moby/nodes_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go index 47ca69e33a1..7a966cfeee1 100644 --- a/discovery/moby/services_test.go +++ b/discovery/moby/services_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -349,7 +349,7 @@ filters: defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go index ef71bc02f53..59d8831c3bf 100644 --- a/discovery/moby/tasks_test.go +++ b/discovery/moby/tasks_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/testdata/swarmprom/services.json b/discovery/moby/testdata/swarmprom/services.json index 72caa7a7f8d..8f6c0793dd2 100644 --- a/discovery/moby/testdata/swarmprom/services.json +++ b/discovery/moby/testdata/swarmprom/services.json @@ -224,7 +224,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/moby/testdata/swarmprom/tasks.json b/discovery/moby/testdata/swarmprom/tasks.json index 33d81f25ce1..af5ff9fe283 100644 --- a/discovery/moby/testdata/swarmprom/tasks.json +++ b/discovery/moby/testdata/swarmprom/tasks.json @@ -973,7 +973,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index d9c48120ae8..1dbd8f1608c 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" nomad "github.com/hashicorp/nomad/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -121,7 +121,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*nomadMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index 357d4a8e9b6..32b087524cb 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -21,9 +21,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -160,7 +160,7 @@ func TestNomadSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) tgs, err := d.refresh(context.Background()) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 8964da9294f..ec127b18618 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -16,10 +16,10 @@ package openstack import ( "context" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" @@ -43,14 +43,14 @@ type HypervisorDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string - logger log.Logger + logger *slog.Logger port int availability gophercloud.Availability } // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, availability gophercloud.Availability, l log.Logger, + port int, region string, availability gophercloud.Availability, l *slog.Logger, ) *HypervisorDiscovery { return &HypervisorDiscovery{ provider: provider, authOpts: opts, diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 45684b4a2ec..e4a97f32cff 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -93,6 +93,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 78c669e6f76..2a9e29f2efb 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -16,17 +16,17 @@ package openstack import ( "context" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/gophercloud/gophercloud/pagination" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" @@ -52,7 +52,7 @@ type InstanceDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string - logger log.Logger + logger *slog.Logger port int allTenants bool availability gophercloud.Availability @@ -60,10 +60,10 @@ type InstanceDiscovery struct { // NewInstanceDiscovery returns a new instance discovery. func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, allTenants bool, availability gophercloud.Availability, l log.Logger, + port int, region string, allTenants bool, availability gophercloud.Availability, l *slog.Logger, ) *InstanceDiscovery { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } return &InstanceDiscovery{ provider: provider, authOpts: opts, @@ -134,7 +134,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, for _, s := range instanceList { if len(s.Addresses) == 0 { - level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + i.logger.Info("Got no IP address", "instance", s.ID) continue } @@ -151,7 +151,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, if !nameOk { flavorID, idOk := s.Flavor["id"].(string) if !idOk { - level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string") + i.logger.Warn("Invalid type for both flavor original_name and flavor id, expected string") continue } labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) @@ -171,22 +171,22 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, for pool, address := range s.Addresses { md, ok := address.([]interface{}) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected array") + i.logger.Warn("Invalid type for address, expected array") continue } if len(md) == 0 { - level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + i.logger.Debug("Got no IP address", "instance", s.ID) continue } for _, address := range md { md1, ok := address.(map[string]interface{}) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict") + i.logger.Warn("Invalid type for address, expected dict") continue } addr, ok := md1["addr"].(string) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected string") + i.logger.Warn("Invalid type for address, expected string") continue } if _, ok := floatingIPPresent[addr]; ok { diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 2b5ac1b89eb..2617baa4e3b 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -134,6 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index c98f78788d4..fa7e0cce902 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -17,10 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "time" - "github.com/go-kit/log" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/mwitkow/go-conntrack" @@ -142,7 +142,7 @@ type refresher interface { } // NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*openstackMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") @@ -163,7 +163,7 @@ func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetr ), nil } -func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { var opts gophercloud.AuthOptions if conf.IdentityEndpoint == "" { var err error diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index a70857a08b2..15bb9809c93 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -16,13 +16,12 @@ package ovhcloud import ( "context" "fmt" + "log/slog" "net/netip" "net/url" "path" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/common/model" @@ -55,10 +54,10 @@ type dedicatedServer struct { type dedicatedServerDiscovery struct { *refresh.Discovery config *SDConfig - logger log.Logger + logger *slog.Logger } -func newDedicatedServerDiscovery(conf *SDConfig, logger log.Logger) *dedicatedServerDiscovery { +func newDedicatedServerDiscovery(conf *SDConfig, logger *slog.Logger) *dedicatedServerDiscovery { return &dedicatedServerDiscovery{config: conf, logger: logger} } @@ -115,10 +114,7 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou for _, dedicatedServerName := range dedicatedServerList { dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName) if err != nil { - err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) - if err != nil { - return nil, err - } + d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) continue } dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer) diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index 52311bcc876..f9dbd6af9ce 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -21,8 +21,8 @@ import ( "os" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) @@ -41,7 +41,7 @@ application_secret: %s consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest) require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) - d, err := newRefresher(&cfg, log.NewNopLogger()) + d, err := newRefresher(&cfg, promslog.NewNopLogger()) require.NoError(t, err) ctx := context.Background() targetGroups, err := d.refresh(ctx) diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index 988b4482f22..08ed70296bf 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -17,10 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "net/netip" "time" - "github.com/go-kit/log" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -137,7 +137,7 @@ func parseIPList(ipList []string) ([]netip.Addr, error) { return ipAddresses, nil } -func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) { switch conf.Service { case "vps": return newVpsDiscovery(conf, logger), nil @@ -148,7 +148,7 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { } // NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ovhcloudMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index 9c95bf90e67..84a35af3ad0 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -20,11 +20,11 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/util/testutil" ) var ( @@ -121,7 +121,7 @@ func TestParseIPs(t *testing.T) { func TestDiscoverer(t *testing.T) { conf, _ := getMockConf("vps") - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index 58ceeabd87a..7050f826a54 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -16,13 +16,12 @@ package ovhcloud import ( "context" "fmt" + "log/slog" "net/netip" "net/url" "path" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/common/model" @@ -68,10 +67,10 @@ type virtualPrivateServer struct { type vpsDiscovery struct { *refresh.Discovery config *SDConfig - logger log.Logger + logger *slog.Logger } -func newVpsDiscovery(conf *SDConfig, logger log.Logger) *vpsDiscovery { +func newVpsDiscovery(conf *SDConfig, logger *slog.Logger) *vpsDiscovery { return &vpsDiscovery{config: conf, logger: logger} } @@ -133,10 +132,7 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { for _, vpsName := range vpsList { vpsDetailed, err := getVpsDetails(client, vpsName) if err != nil { - err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) - if err != nil { - return nil, err - } + d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) continue } vpsDetailedList = append(vpsDetailedList, *vpsDetailed) diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index 2d2d6dcd219..00d59da7f08 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -23,8 +23,8 @@ import ( yaml "gopkg.in/yaml.v2" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -43,7 +43,7 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) - d, err := newRefresher(&cfg, log.NewNopLogger()) + d, err := newRefresher(&cfg, promslog.NewNopLogger()) require.NoError(t, err) ctx := context.Background() targetGroups, err := d.refresh(ctx) diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 8f89acbf936..6122a76da79 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -19,6 +19,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -27,11 +28,11 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -138,14 +139,14 @@ type Discovery struct { } // NewDiscovery returns a new PuppetDB discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*puppetdbMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http") diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index bf9c7b215e5..4585b782233 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -22,10 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -70,7 +70,7 @@ func TestPuppetSlashInURL(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) require.Equal(t, apiURL, d.url) @@ -94,7 +94,7 @@ func TestPuppetDBRefresh(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -142,7 +142,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -201,7 +201,7 @@ func TestPuppetDBInvalidCode(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -229,7 +229,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/refresh/refresh.go b/discovery/refresh/refresh.go index f037a90cff0..31646c0e4c1 100644 --- a/discovery/refresh/refresh.go +++ b/discovery/refresh/refresh.go @@ -16,17 +16,17 @@ package refresh import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) type Options struct { - Logger log.Logger + Logger *slog.Logger Mech string Interval time.Duration RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) @@ -35,7 +35,7 @@ type Options struct { // Discovery implements the Discoverer interface. type Discovery struct { - logger log.Logger + logger *slog.Logger interval time.Duration refreshf func(ctx context.Context) ([]*targetgroup.Group, error) metrics *discovery.RefreshMetrics @@ -45,9 +45,9 @@ type Discovery struct { func NewDiscovery(opts Options) *Discovery { m := opts.MetricsInstantiator.Instantiate(opts.Mech) - var logger log.Logger + var logger *slog.Logger if opts.Logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } else { logger = opts.Logger } @@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } } else { select { @@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } continue } diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index f8e1a83f5ee..670e439c4f1 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "os" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -185,7 +185,7 @@ func init() { // the Discoverer interface. type Discovery struct{} -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*scalewayMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 675149f2a30..7b3b18f471e 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -19,12 +19,12 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strings" "time" - "github.com/go-kit/log" "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -146,7 +146,7 @@ type Discovery struct { } // New returns a new Discovery which periodically refreshes its targets. -func New(logger log.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*tritonMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index e37693e6bfc..b2d06afaf6b 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -21,7 +21,6 @@ import ( "net/http/httptest" "net/url" "strconv" - "strings" "testing" "github.com/prometheus/client_golang/prometheus" @@ -182,8 +181,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { td, m, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint")) + require.ErrorContains(t, err, "an error occurred when requesting targets from the discovery endpoint") m.Unregister() } @@ -193,8 +191,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := td.refresh(ctx) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) + require.ErrorContains(t, err, context.Canceled.Error()) m.Unregister() } diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index c8af2f15878..de806895d7b 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "net/url" "path" @@ -24,7 +25,6 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/kolo/xmlrpc" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -41,10 +41,10 @@ const ( uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_" uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname" uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn" - uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id" - uyuniLablelGroups = uyuniMetaLabelPrefix + "groups" - uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" - uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter" + uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id" + uyuniLabelGroups = uyuniMetaLabelPrefix + "groups" + uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" + uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" @@ -109,7 +109,7 @@ type Discovery struct { entitlement string separator string interval time.Duration - logger log.Logger + logger *slog.Logger } // NewDiscovererMetrics implements discovery.Config. @@ -212,7 +212,7 @@ func getEndpointInfoForSystems( } // NewDiscovery returns a uyuni discovery for the given configuration. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*uyuniMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") @@ -270,10 +270,10 @@ func (d *Discovery) getEndpointLabels( model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), - uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), - uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), - uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), - uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), + uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), + uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), + uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName), + uyuniLabelExporter: model.LabelValue(endpoint.ExporterName), uyuniLabelProxyModule: model.LabelValue(endpoint.Module), uyuniLabelMetricsPath: model.LabelValue(endpoint.Path), uyuniLabelScheme: model.LabelValue(scheme), diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index aaa9c64e47a..f82b22168af 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -16,13 +16,13 @@ package vultr import ( "context" "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -114,7 +114,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*vultrMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") diff --git a/discovery/vultr/vultr_test.go b/discovery/vultr/vultr_test.go index 2f12a35529f..00ef21e38c4 100644 --- a/discovery/vultr/vultr_test.go +++ b/discovery/vultr/vultr_test.go @@ -19,9 +19,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -57,7 +57,7 @@ func TestVultrSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdMock.Mock.Endpoint()) require.NoError(t, err) diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index b699995fb7d..2cf5b2f9cbf 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -52,16 +52,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Equal(t, "invalid xDS server URL", err.Error()) + require.EqualError(t, err, "invalid xDS server URL") } func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Contains(t, err.Error(), "must be either 'http' or 'https'") + require.ErrorContains(t, err, "must be either 'http' or 'https'") } func TestMakeXDSResourceHttpEndpoint(t *testing.T) { diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index d1d540aaf4d..55b3d628e53 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -15,14 +15,14 @@ package xds import ( "fmt" + "log/slog" "net/url" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "google.golang.org/protobuf/types/known/anypb" "github.com/prometheus/prometheus/discovery" @@ -99,7 +99,7 @@ func (c *KumaSDConfig) SetDirectory(dir string) { func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { logger := opts.Logger if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return NewKumaHTTPDiscovery(c, logger, opts.Metrics) @@ -158,7 +158,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L return targets, nil } -func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { +func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { m, ok := metrics.(*xdsMetrics) if !ok { return nil, fmt.Errorf("invalid discovery metrics type") @@ -170,7 +170,7 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discove var err error clientID, err = osutil.GetFQDN() if err != nil { - level.Debug(logger).Log("msg", "error getting FQDN", "err", err) + logger.Debug("error getting FQDN", "err", err) clientID = "prometheus" } } diff --git a/discovery/xds/kuma_mads.pb.go b/discovery/xds/kuma_mads.pb.go index b1079bf23f7..210a5343a4b 100644 --- a/discovery/xds/kuma_mads.pb.go +++ b/discovery/xds/kuma_mads.pb.go @@ -23,13 +23,14 @@ package xds import ( context "context" + reflect "reflect" + sync "sync" + v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" ) const ( diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index cfb9cbac501..23d754c4b71 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -201,9 +201,8 @@ func TestKumaMadsV1ResourceParserInvalidResources(t *testing.T) { }} groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) require.Nil(t, groups) - require.Error(t, err) - require.Contains(t, err.Error(), "cannot parse") + require.ErrorContains(t, err, "cannot parse") } func TestNewKumaHTTPDiscovery(t *testing.T) { diff --git a/discovery/xds/xds.go b/discovery/xds/xds.go index 8191d6be1ae..db55a2b6f74 100644 --- a/discovery/xds/xds.go +++ b/discovery/xds/xds.go @@ -15,11 +15,10 @@ package xds import ( "context" + "log/slog" "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "google.golang.org/protobuf/encoding/protojson" @@ -104,7 +103,7 @@ type fetchDiscovery struct { refreshInterval time.Duration parseResources resourceParser - logger log.Logger + logger *slog.Logger metrics *xdsMetrics } @@ -140,7 +139,7 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou } if err != nil { - level.Error(d.logger).Log("msg", "error parsing resources", "err", err) + d.logger.Error("error parsing resources", "err", err) d.metrics.fetchFailuresCount.Inc() return } @@ -153,12 +152,12 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl) if err != nil { - level.Error(d.logger).Log("msg", "error parsing resources", "err", err) + d.logger.Error("error parsing resources", "err", err) d.metrics.fetchFailuresCount.Inc() return } - level.Debug(d.logger).Log("msg", "Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets)) + d.logger.Debug("Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets)) select { case <-ctx.Done(): diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go index 7cce021c5f0..db10adc1a25 100644 --- a/discovery/xds/xds_test.go +++ b/discovery/xds/xds_test.go @@ -22,9 +22,9 @@ import ( "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "google.golang.org/protobuf/types/known/anypb" @@ -90,7 +90,7 @@ func constantResourceParser(targets []model.LabelSet, err error) resourceParser } } -var nopLogger = log.NewNopLogger() +var nopLogger = promslog.NewNopLogger() type testResourceClient struct { resourceTypeURL string diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 92904dd71c8..a1cfe3d055b 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -18,15 +18,16 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/go-zookeeper/zk" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -146,16 +147,16 @@ type Discovery struct { treeCaches []*treecache.ZookeeperTreeCache parse func(data []byte, path string) (model.LabelSet, error) - logger log.Logger + logger *slog.Logger } // NewNerveDiscovery returns a new Discovery for the given Nerve config. -func NewNerveDiscovery(conf *NerveSDConfig, logger log.Logger) (*Discovery, error) { +func NewNerveDiscovery(conf *NerveSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember) } // NewServersetDiscovery returns a new Discovery for the given serverset config. -func NewServersetDiscovery(conf *ServersetSDConfig, logger log.Logger) (*Discovery, error) { +func NewServersetDiscovery(conf *ServersetSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember) } @@ -165,11 +166,11 @@ func NewDiscovery( srvs []string, timeout time.Duration, paths []string, - logger log.Logger, + logger *slog.Logger, pf func(data []byte, path string) (model.LabelSet, error), ) (*Discovery, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } conn, _, err := zk.Connect( diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 7d9e5a3c809..dd207dc3821 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -15,11 +15,15 @@ The Prometheus monitoring server | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | | --version | Show application version. | | | --config.file | Prometheus configuration file path. | `prometheus.yml` | +| --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | | --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | +| --auto-gomaxprocs | Automatically set GOMAXPROCS to match Linux container CPU quota | `true` | +| --auto-gomemlimit | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | | --web.max-connections | Maximum number of simultaneous connections across all listeners. | `512` | +| --web.max-notifications-subscribers | Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close. | `16` | | --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | --web.user-assets | Path to static asset directory, available at /user. | | @@ -27,13 +31,13 @@ The Prometheus monitoring server | --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | | --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | | --web.remote-write-receiver.accepted-protobuf-messages | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | +| --web.enable-otlp-receiver | Enable API endpoint accepting OTLP write requests. | `false` | | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | | --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` | | --storage.tsdb.path | Base path for metrics storage. Use with server mode only. | `data/` | -| --storage.tsdb.retention | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | -| --storage.tsdb.retention.time | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | +| --storage.tsdb.retention.time | How long to retain samples in storage. If neither this flag nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | | --storage.tsdb.retention.size | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | | | --storage.tsdb.no-lockfile | Do not create lockfile in data directory. Use with server mode only. | `false` | | --storage.tsdb.head-chunks-write-queue-size | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` | @@ -56,8 +60,8 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --scrape.name-escaping-scheme | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index e48cede79c1..5e2a8f6bb1d 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system. | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | | --experimental | Enable experimental commands. | -| --enable-feature ... | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | +| --enable-feature ... | Comma separated feature names to enable. Currently unused. | @@ -462,6 +462,7 @@ Unit tests for rules. | Flag | Description | Default | | --- | --- | --- | | --run ... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | +| --debug | Enable unit test debugging. | `false` | | --diff | [Experimental] Print colored differential output between expected & received output. | `false` | diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md index 3c1ec84f0f6..cd33dba8e3e 100644 --- a/docs/configuration/alerting_rules.md +++ b/docs/configuration/alerting_rules.md @@ -21,10 +21,13 @@ An example rules file with an alert would be: ```yaml groups: - name: example + labels: + team: myteam rules: - alert: HighRequestLatency expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 for: 10m + keep_firing_for: 5m labels: severity: page annotations: @@ -38,6 +41,13 @@ the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state. Alerting rules without the `for` clause will become active on the first evaluation. +There is also an optional `keep_firing_for` clause that tells Prometheus to keep +this alert firing for the specified duration after the firing condition was last met. +This can be used to prevent situations such as flapping alerts, false resolutions +due to lack of data loss, etc. Alerting rules without the `keep_firing_for` clause +will deactivate on the first evaluation where the condition is not met (assuming +any optional `for` duration desribed above has been satisfied). + The `labels` clause allows specifying a set of additional labels to be attached to the alert. Any existing conflicting labels will be overwritten. The label values can be templated. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 1f2db6ee82e..2d1e4b1801b 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -71,12 +71,19 @@ global: # How frequently to evaluate rules. [ evaluation_interval: | default = 1m ] - # Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. - # Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. + # Offset the rule evaluation timestamp of this particular group by the + # specified duration into the past to ensure the underlying metrics have + # been received. Metric availability delays are more likely to occur when + # Prometheus is running as a remote write target, but can also occur when + # there's anomalies with scraping. [ rule_query_offset: | default = 0s ] # The labels to add to any time series or alerts when communicating with - # external systems (federation, remote storage, Alertmanager). + # external systems (federation, remote storage, Alertmanager). + # Environment variable references `${var}` or `$var` are replaced according + # to the values of the current environment variables. + # References to undefined variables are replaced by the empty string. + # The `$` character can be escaped by using `$$`. external_labels: [ : ... ] @@ -94,27 +101,29 @@ global: # change or be removed in the future. [ body_size_limit: | default = 0 ] - # Per-scrape limit on number of scraped samples that will be accepted. + # Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] - # Per-scrape limit on number of labels that will be accepted for a sample. If - # more than this number of labels are present post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the number of labels that will be accepted per sample. If more + # than this number of labels are present on any sample post metric-relabeling, + # the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] - # Per-scrape limit on length of labels name that will be accepted for a sample. - # If a label name is longer than this number post metric-relabeling, the entire - # scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label name. If any label + # name in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label names are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] - # Per-scrape limit on length of labels value that will be accepted for a sample. - # If a label value is longer than this number post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label value. If any label + # value in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label values are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] - # Per-scrape config limit on number of unique targets that will be + # Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could @@ -126,9 +135,9 @@ global: [ keep_dropped_targets: | default = 0 ] # Specifies the validation scheme for metric and label names. Either blank or - # "legacy" for letters, numbers, colons, and underscores; or "utf8" for full - # UTF-8 support. - [ metric_name_validation_scheme | default "legacy" ] + # "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons, + # and underscores. + [ metric_name_validation_scheme | default "utf8" ] runtime: # Configure the Go garbage collector GOGC parameter @@ -162,8 +171,17 @@ remote_write: [ - ... ] # Settings related to the OTLP receiver feature. +# See https://prometheus.io/docs/guides/opentelemetry/ for best practices. otlp: [ promote_resource_attributes: [, ...] | default = [ ] ] + # Configures translation of OTLP metrics when received through the OTLP metrics + # endpoint. Available values: + # - "UnderscoreEscapingWithSuffixes" refers to commonly agreed normalization used + # by OpenTelemetry in https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus + # - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus. + # It preserves all special characters like dots, but it still add required suffixes + # for units and _total like in UnderscoreEscapingWithSuffixes. + [ translation_strategy: | default = "UnderscoreEscapingWithSuffixes" ] # Settings related to the remote read feature. remote_read: @@ -203,12 +221,18 @@ job_name: # The protocols to negotiate during a scrape with the client. # Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, -# OpenMetricsText1.0.0, PrometheusText0.0.4. +# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0. [ scrape_protocols: [, ...] | default = ] -# Whether to scrape a classic histogram that is also exposed as a native +# Fallback protocol to use if a scrape returns blank, unparseable, or otherwise +# invalid Content-Type. +# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, +# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0. +[ fallback_scrape_protocol: ] + +# Whether to scrape a classic histogram, even if it is also exposed as a native # histogram (has no effect without --enable-feature=native-histograms). -[ scrape_classic_histograms: | default = false ] +[ always_scrape_classic_histograms: | default = false ] # The HTTP resource path on which to fetch metrics from targets. [ metrics_path: | default = /metrics ] @@ -264,69 +288,14 @@ params: # response from the scraped target. [ enable_compression: | default = true ] -# Sets the `Authorization` header on every scrape request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Sets the `Authorization` header on every scrape request with -# the configured credentials. -authorization: - # Sets the authentication type of the request. - [ type: | default: Bearer ] - # Sets the credentials of the request. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials of the request with the credentials read from the - # configured file. It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether scrape requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# Configures the scrape request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - # File to which scrape failures are logged. # Reloading the configuration will reopen the file. [ scrape_failure_log_file: ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] + # List of Azure service discovery configurations. azure_sd_configs: [ - ... ] @@ -454,41 +423,43 @@ metric_relabel_configs: # change or be removed in the future. [ body_size_limit: | default = 0 ] -# Per-scrape limit on number of scraped samples that will be accepted. +# Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] -# Per-scrape limit on number of labels that will be accepted for a sample. If -# more than this number of labels are present post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the number of labels that will be accepted per sample. If more +# than this number of labels are present on any sample post metric-relabeling, +# the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] -# Per-scrape limit on length of labels name that will be accepted for a sample. -# If a label name is longer than this number post metric-relabeling, the entire -# scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label name. If any label +# name in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label names are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] -# Per-scrape limit on length of labels value that will be accepted for a sample. -# If a label value is longer than this number post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label value. If any label +# value in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label values are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] -# Per-scrape config limit on number of unique targets that will be +# Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] -# Per-job limit on the number of targets dropped by relabeling +# Limit per scrape config on the number of targets dropped by relabeling # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] -# Specifies the validation scheme for metric and label names. Either blank or -# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full -# UTF-8 support. -[ metric_name_validation_scheme | default "legacy" ] +# Specifies the validation scheme for metric and label names. Either blank or +# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and +# underscores. +[ metric_name_validation_scheme | default "utf8" ] # Limit on total number of positive and negative buckets allowed in a single # native histogram. The resolution of a histogram with more buckets will be @@ -540,6 +511,73 @@ metric_relabel_configs: Where `` must be unique across all scrape configurations. +### `` + +A `http_config` allows configuring HTTP requests. + +``` +# Sets the `Authorization` header on every request with the +# configured username and password. +# username and username_file are mutually exclusive. +# password and password_file are mutually exclusive. +basic_auth: + [ username: ] + [ username_file: ] + [ password: ] + [ password_file: ] + +# Sets the `Authorization` header on every request with +# the configured credentials. +authorization: + # Sets the authentication type of the request. + [ type: | default: Bearer ] + # Sets the credentials of the request. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials of the request with the credentials read from the + # configured file. It is mutually exclusive with `credentials`. + [ credentials_file: ] + +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + +# Configure whether requests follow HTTP 3xx redirects. +[ follow_redirects: | default = true ] + +# Whether to enable HTTP2. +[ enable_http2: | default: true ] + +# Configures the request's TLS settings. +tls_config: + [ ] + +# Optional proxy URL. +[ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] +``` + ### `` A `tls_config` allows configuring TLS connections. @@ -681,65 +719,9 @@ subscription_id: # instead be specified in the relabeling rule. [ port: | default = 80 ] -# Authentication information used to authenticate to the Azure API. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information, currently not support by Azure. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by Azure. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by Azure. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -785,14 +767,17 @@ The following meta labels are available on targets during [relabeling](#relabel_ services: [ - ] -# See https://www.consul.io/api/catalog.html#list-nodes-for-service to know more -# about the possible filters that can be used. +# A Consul Filter expression used to filter the catalog results +# See https://www.consul.io/api-docs/catalog#list-services to know more +# about the filter expressions that can be used. +[ filter: ] +# The `tags` and `node_meta` fields are deprecated in Consul in favor of `filter`. # An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. tags: [ - ] -# Node metadata key/value pairs to filter nodes for a given service. +# Node metadata key/value pairs to filter nodes for a given service. As of Consul 1.14, consider `filter` instead. [ node_meta: [ : ... ] ] @@ -806,65 +791,9 @@ tags: # On large setup it might be a good idea to increase this value because the catalog will change all the time. [ refresh_interval: | default = 30s ] -# Authentication information used to authenticate to the consul server. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` Note that the IP number and port used to scrape the targets is assembled as @@ -904,77 +833,20 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_digitalocean_vpc`: the id of the droplet's VPC ```yaml -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. +# The port to scrape metrics from. +[ port: | default = 80 ] -# Optional HTTP basic authentication information, not currently supported by DigitalOcean. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] +# The time after which the droplets are refreshed. +[ refresh_interval: | default = 60s ] -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] +``` -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] +### `` -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - -# The port to scrape metrics from. -[ port: | default = 80 ] - -# The time after which the droplets are refreshed. -[ refresh_interval: | default = 60s ] -``` - -### `` - -Docker SD configurations allow retrieving scrape targets from [Docker Engine](https://docs.docker.com/engine/) hosts. +Docker SD configurations allow retrieving scrape targets from [Docker Engine](https://docs.docker.com/engine/) hosts. This SD discovers "containers" and will create a target for each network IP and port the container is configured to expose. @@ -1001,34 +873,6 @@ See below for the configuration options for Docker discovery: # Address of the Docker daemon. host: -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from, when `role` is nodes, and for discovered # tasks and services that don't have published ports. [ port: | default = 80 ] @@ -1052,39 +896,9 @@ tls_config: # The time after which the containers are refreshed. [ refresh_interval: | default = 60s ] -# Authentication information used to authenticate to the Docker daemon. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -1193,34 +1007,6 @@ See below for the configuration options for Docker Swarm discovery: # Address of the Docker daemon. host: -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] - # Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`. role: @@ -1241,39 +1027,9 @@ role: # The time after which the service discovery data is refreshed. [ refresh_interval: | default = 60s ] -# Authentication information used to authenticate to the Docker daemon. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -1388,65 +1144,9 @@ filters: [ - name: values: , [...] ] -# Authentication information used to authenticate to the EC2 API. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information, currently not supported by AWS. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by AWS. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by AWS. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -1675,63 +1375,9 @@ query: # The port to scrape metrics from. [ port: | default = 80 ] -# TLS configuration to connect to the PuppetDB. -tls_config: - [ ] - -# basic_auth, authorization, and oauth2, are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# `Authorization` HTTP header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials with the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml) @@ -1912,80 +1558,21 @@ The labels below are only available for targets with `role` set to `robot`: # One of robot or hcloud. role: -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. +# The port to scrape metrics from. +[ port: | default = 80 ] -# Optional HTTP basic authentication information, required when role is robot -# Role hcloud does not support basic auth. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] +# The time after which the servers are refreshed. +[ refresh_interval: | default = 60s ] -# Optional `Authorization` header configuration, required when role is -# hcloud. Role robot does not support bearer token authentication. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] +``` -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] +### `` -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - -# The port to scrape metrics from. -[ port: | default = 80 ] - -# The time after which the servers are refreshed. -[ refresh_interval: | default = 60s ] -``` - -### `` - -HTTP-based service discovery provides a more generic way to configure static targets -and serves as an interface to plug in custom service discovery mechanisms. +HTTP-based service discovery provides a more generic way to configure static targets +and serves as an interface to plug in custom service discovery mechanisms. It fetches targets from an HTTP endpoint containing a list of zero or more ``s. The target must reply with an HTTP 200 response. @@ -2021,65 +1608,9 @@ url: # Refresh interval to re-query the endpoint. [ refresh_interval: | default = 60s ] -# Authentication information used to authenticate to the API server. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2113,74 +1644,15 @@ following meta labels are available on all targets during # The unique ID of the data center. datacenter_id: -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information, required when using IONOS -# Cloud username and password as authentication method. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, required when using IONOS -# Cloud token as authentication method. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] # The time after which the servers are refreshed. [ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2298,6 +1770,8 @@ The `endpointslice` role discovers targets from existing endpointslices. For eac address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well. +The role requires the `discovery.k8s.io/v1` API version (available since Kubernetes v1.21). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. @@ -2318,7 +1792,7 @@ Available meta labels: * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation. * `__meta_kubernetes_endpointslice_endpoint_hostname`: Hostname of the referenced endpoint. * `__meta_kubernetes_endpointslice_endpoint_node_name`: Name of the Node hosting the referenced endpoint. - * `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in (only available when using the `discovery.k8s.io/v1` API group). + * `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint. @@ -2331,6 +1805,8 @@ The `ingress` role discovers a target for each path of each ingress. This is generally useful for blackbox monitoring of an ingress. The address will be set to the host specified in the ingress spec. +The role requires the `networking.k8s.io/v1` API version (available since Kubernetes v1.19). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the ingress object. @@ -2362,66 +1838,6 @@ role: # Note that api_server and kube_config are mutually exclusive. [ kubeconfig_file: ] -# Optional authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # Optional namespace discovery. If omitted, all namespaces are used. namespaces: own_namespace: @@ -2451,6 +1867,10 @@ attach_metadata: # Attaches node metadata to discovered targets. Valid for roles: pod, endpoints, endpointslice. # When set to true, Prometheus must have permissions to get Nodes. [ node: | default = false ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml) @@ -2491,66 +1911,9 @@ server: # The time after which the monitoring assignments are refreshed. [ fetch_timeout: | default = 2m ] -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] - -# Authentication information used to authenticate to the Docker daemon. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional the `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials with the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful way @@ -2592,77 +1955,21 @@ See below for the configuration options for Lightsail discovery: [ access_key: ] [ secret_key: ] # Named AWS profile used to connect to the API. -[ profile: ] - -# AWS Role ARN, an alternative to using AWS API keys. -[ role_arn: ] - -# Refresh interval to re-read the instance list. -[ refresh_interval: | default = 60s ] - -# The port to scrape metrics from. If using the public IP address, this must -# instead be specified in the relabeling rule. -[ port: | default = 80 ] - -# Authentication information used to authenticate to the Lightsail API. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information, currently not supported by AWS. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by AWS. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by AWS. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] +[ profile: ] -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] +# AWS Role ARN, an alternative to using AWS API keys. +[ role_arn: ] -# Whether to enable HTTP2. -[ enable_http2: | default: true ] +# Refresh interval to re-read the instance list. +[ refresh_interval: | default = 60s ] -# TLS configuration. -tls_config: - [ ] +# The port to scrape metrics from. If using the public IP address, this must +# instead be specified in the relabeling rule. +[ port: | default = 80 ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2673,6 +1980,8 @@ This service discovery uses the public IPv4 address by default, by that can be changed with relabeling, as demonstrated in [the Prometheus linode-sd configuration file](/documentation/examples/prometheus-linode.yml). +Linode APIv4 Token must be created with scopes: `linodes:read_only`, `ips:read_only`, and `events:read_only`. + The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_linode_instance_id`: the id of the linode instance @@ -2700,71 +2009,10 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_linode_ipv6_ranges`: a list of IPv6 ranges with mask assigned to the linode instance joined by the tag separator ```yaml -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. -# Note: Linode APIv4 Token must be created with scopes: 'linodes:read_only', 'ips:read_only', and 'events:read_only' - -# Optional HTTP basic authentication information, not currently supported by Linode APIv4. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional the `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials with the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] # Optional region to filter on. [ region: ] -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] @@ -2773,6 +2021,10 @@ tls_config: # The time after which the linode instances are refreshed. [ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2813,67 +2065,9 @@ servers: # It is mutually exclusive with `auth_token` and other authentication mechanisms. [ auth_token_file: ] -# Sets the `Authorization` header on every request with the -# configured username and password. -# This is mutually exclusive with other authentication mechanisms. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -# NOTE: The current version of DC/OS marathon (v1.11.0) does not support -# standard `Authentication` header, use `auth_token` or `auth_token_file` -# instead. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration for connecting to marathon servers -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` By default every app listed in Marathon will be scraped by Prometheus. If not all @@ -2935,65 +2129,9 @@ The following meta labels are available on targets during [relabeling](#relabel_ [ server: ] [ tag_separator: | default = ,] -# Authentication information used to authenticate to the nomad server. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -3131,66 +2269,12 @@ See below for the configuration options for Eureka discovery: # The URL to connect to the Eureka server. server: -# Sets the `Authorization` header on every request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configures the scrape request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - # Refresh interval to re-read the app instance list. [ refresh_interval: | default = 30s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [the Prometheus eureka-sd configuration file](/documentation/examples/prometheus-eureka.yml) @@ -3281,49 +2365,19 @@ role: # Zone is the availability zone of your targets (e.g. fr-par-1). [ zone: | default = fr-par-1 ] -# NameFilter specify a name filter (works as a LIKE) to apply on the server listing request. -[ name_filter: ] - -# TagsFilter specify a tag filter (a server needs to have all defined tags to be listed) to apply on the server listing request. -tags_filter: -[ - ] - -# Refresh interval to re-read the targets list. -[ refresh_interval: | default = 60s ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] +# NameFilter specify a name filter (works as a LIKE) to apply on the server listing request. +[ name_filter: ] + +# TagsFilter specify a tag filter (a server needs to have all defined tags to be listed) to apply on the server listing request. +tags_filter: +[ - ] + +# Refresh interval to re-read the targets list. +[ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -3363,61 +2417,9 @@ password: # Refresh interval to re-read the managed targets list. [ refresh_interval: | default = 60s ] -# Optional HTTP basic authentication information, currently not supported by Uyuni. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by Uyuni. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by Uyuni. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [the Prometheus uyuni-sd configuration file](/documentation/examples/prometheus-uyuni.yml) @@ -3452,72 +2454,15 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_vultr_instance_allowed_bandwidth_gb` : Monthly bandwidth quota in GB. ```yaml -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information, not currently supported by Vultr. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] # The time after which the instances are refreshed. [ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` @@ -3669,25 +2614,6 @@ through the `__alerts_path__` label. # Configures the protocol scheme used for requests. [ scheme: | default = http ] -# Sets the `Authorization` header on every request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - # Optionally configures AWS's Signature Verification 4 signing process to sign requests. # Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam. # To use the default credentials from the AWS SDK, use `sigv4: {}`. @@ -3707,44 +2633,9 @@ sigv4: # AWS Role ARN, an alternative to using AWS API keys. [ role_arn: ] -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configures the scrape request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] # List of Azure service discovery configurations. azure_sd_configs: @@ -3915,25 +2806,6 @@ write_relabel_configs: # For the `io.prometheus.write.v2.Request` message, this option is noop (always true). [ send_native_histograms: | default = false ] -# Sets the `Authorization` header on every remote write request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default = Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - # Optionally configures AWS's Signature Verification 4 signing process to # sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread. # To use the default credentials from the AWS SDK, use `sigv4: {}`. @@ -3953,11 +2825,6 @@ sigv4: # AWS Role ARN, an alternative to using AWS API keys. [ role_arn: ] -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam. -oauth2: - [ ] - # Optional AzureAD configuration. # Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam. azuread: @@ -3984,43 +2851,9 @@ azuread: # Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread. # To use the default credentials from the Google Cloud SDK, use `google_iam: {}`. google_iam: - # Service account key with monitoring write permessions. + # Service account key with monitoring write permissions. credentials_file: -# Configures the remote write request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default = false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default = true ] - # Configures the queue used to write to remote storage. queue_config: # Number of samples to buffer per shard before we block reading of more @@ -4062,6 +2895,11 @@ metadata_config: [ send_interval: | default = 1m ] # Maximum number of samples per send. [ max_samples_per_send: | default = 500] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +# enable_http2 defaults to false for remote-write. +[ ] ``` There is a list of @@ -4096,66 +2934,12 @@ headers: # the local storage should have complete data for. [ read_recent: | default = false ] -# Sets the `Authorization` header on every remote read request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configures the remote read request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Custom HTTP headers to be sent along with each request. -# Headers that are set by Prometheus itself can't be overwritten. -http_headers: - # Header name. - [ : - # Header values. - [ values: [, ...] ] - # Headers values. Hidden in configuration page. - [ secrets: [, ...] ] - # Files to read header values from. - [ files: [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - # Whether to use the external labels as selectors for the remote read endpoint. [ filter_external_labels: | default = true ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` There is a list of @@ -4166,8 +2950,6 @@ with this feature. `tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB. -NOTE: Out-of-order ingestion is an experimental feature, but you do not need any additional flag to enable it. Setting `out_of_order_time_window` to a positive duration enables it. - ```yaml # Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time. # An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index 9aa226bbc0b..9a8e7a70c92 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -89,6 +89,11 @@ name: # Offset the rule evaluation timestamp of this particular group by the specified duration into the past. [ query_offset: | default = global.rule_query_offset ] +# Labels to add or overwrite before storing the result for its rules. +# Labels defined in will override the key if it has a collision. +labels: + [ : ] + rules: [ - ... ] ``` diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 7b07a04d0e2..8c0e319f9c5 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -11,23 +11,6 @@ Their behaviour can change in future releases which will be communicated via the You can enable them using the `--enable-feature` flag with a comma separated list of features. They may be enabled by default in future versions. -## Expand environment variables in external labels - -`--enable-feature=expand-external-labels` - -Replace `${var}` or `$var` in the [`external_labels`](configuration/configuration.md#configuration-file) -values according to the values of the current environment variables. References -to undefined variables are replaced by the empty string. -The `$` character can be escaped by using `$$`. - -## Remote Write Receiver - -`--enable-feature=remote-write-receiver` - -The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview). - -Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus. - ## Exemplars storage `--enable-feature=exemplar-storage` @@ -40,9 +23,8 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem `--enable-feature=memory-snapshot-on-shutdown` -This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores -it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped -chunks without the need of WAL replay. +This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot +and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot. ## Extra scrape metrics @@ -55,30 +37,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. - `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`. -## New service discovery manager - -`--enable-feature=new-service-discovery-manager` - -When enabled, Prometheus uses a new service discovery manager that does not -restart unchanged discoveries upon reloading. This makes reloads faster and reduces -pressure on service discoveries' sources. - -Users are encouraged to test the new service discovery manager and report any -issues upstream. - -In future releases, this new service discovery manager will become the default and -this feature flag will be ignored. - -## Prometheus agent - -`--enable-feature=agent` - -When enabled, Prometheus runs in agent mode. The agent mode is limited to -discovery, scrape and remote write. - -This is useful when you do not need to query the Prometheus data locally, but -only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). - ## Per-step stats `--enable-feature=promql-per-step-stats` @@ -89,29 +47,6 @@ statistics. Currently this is limited to totalQueryableSamples. When disabled in either the engine or the query, per-step statistics are not computed at all. -## Auto GOMAXPROCS - -`--enable-feature=auto-gomaxprocs` - -When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. - -## Auto GOMEMLIMIT - -`--enable-feature=auto-gomemlimit` - -When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used. - -There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. - -## No default scrape port - -`--enable-feature=no-default-scrape-port` - -When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to -the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior. -In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or -by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed. - ## Native Histograms `--enable-feature=native-histograms` @@ -134,67 +69,7 @@ those classic histograms that do not come with a corresponding native histogram. However, if a native histogram is present, Prometheus will ignore the corresponding classic histogram, with the notable exception of exemplars, which are always ingested. To keep the classic histograms as well, enable -`scrape_classic_histograms` in the scrape job. - -_Note about the format of `le` and `quantile` label values:_ - -In certain situations, the protobuf parsing changes the number formatting of -the `le` labels of classic histograms and the `quantile` labels of -summaries. Typically, this happens if the scraped target is instrumented with -[client_golang](https://github.com/prometheus/client_golang) provided that -[promhttp.HandlerOpts.EnableOpenMetrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus/promhttp#HandlerOpts) -is set to `false`. In such a case, integer label values are represented in the -text format as such, e.g. `quantile="1"` or `le="2"`. However, the protobuf parsing -changes the representation to float-like (following the OpenMetrics -specification), so the examples above become `quantile="1.0"` and `le="2.0"` after -ingestion into Prometheus, which changes the identity of the metric compared to -what was ingested before via the text format. - -The effect of this change is that alerts, recording rules and dashboards that -directly reference label values as whole numbers such as `le="1"` will stop -working. - -Aggregation by the `le` and `quantile` labels for vectors that contain the old and -new formatting will lead to unexpected results, and range vectors that span the -transition between the different formatting will contain additional series. -The most common use case for both is the quantile calculation via -`histogram_quantile`, e.g. -`histogram_quantile(0.95, sum by (le) (rate(histogram_bucket[10m])))`. -The `histogram_quantile` function already tries to mitigate the effects to some -extent, but there will be inaccuracies, in particular for shorter ranges that -cover only a few samples. - -Ways to deal with this change either globally or on a per metric basis: - -- Fix references to integer `le`, `quantile` label values, but otherwise do -nothing and accept that some queries that span the transition time will produce -inaccurate or unexpected results. -_This is the recommended solution, to get consistently normalized label values._ -Also Prometheus 3.0 is expected to enforce normalization of these label values. -- Use `metric_relabel_config` to retain the old labels when scraping targets. -This should **only** be applied to metrics that currently produce such labels. - - -```yaml - metric_relabel_configs: - - source_labels: - - quantile - target_label: quantile - regex: (\d+)\.0+ - - source_labels: - - le - - __name__ - target_label: le - regex: (\d+)\.0+;.*_bucket -``` - -## OTLP Receiver - -`--enable-feature=otlp-write-receiver` - -The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. -Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features -won't work when you push OTLP metrics. +`always_scrape_classic_histograms` in the scrape job. ## Experimental PromQL functions @@ -226,6 +101,12 @@ This has the potential to improve rule group evaluation latency and resource uti The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default. +## Serve old Prometheus UI + +Fall back to serving the old (Prometheus 2.x) web UI instead of the new UI. The new UI that was released as part of Prometheus 3.0 is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. However, it is not fully feature complete and battle-tested yet, so some users may still prefer using the old UI. + +`--enable-feature=old-ui` + ## Metadata WAL Records `--enable-feature=metadata-wal-records` @@ -258,10 +139,15 @@ When enabled, Prometheus will change the way in which the `__name__` label is re This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label. -## UTF-8 Name Support +## Auto Reload Config + +`--enable-feature=auto-reload-config` -`--enable-feature=utf8-names` +When enabled, Prometheus will automatically reload its configuration file at a +specified interval. The interval is defined by the +`--config.auto-reload-interval` flag, which defaults to `30s`. -When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set. -By itself, this flag does not enable the request of UTF-8 names via content negotiation. -Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis. +Configuration reloads are triggered by detecting changes in the checksum of the +main configuration file or any referenced files, such as rule and scrape +configurations. To ensure consistency and avoid issues during reloads, it's +recommended to update these files atomically. diff --git a/docs/migration.md b/docs/migration.md index cb88bbfd6f7..73de5bcaaff 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -3,198 +3,216 @@ title: Migration sort_rank: 10 --- -# Prometheus 2.0 migration guide +# Prometheus 3.0 migration guide -In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print), -the Prometheus 2.0 release contains a number of backwards incompatible changes. -This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0 and newer versions. +In line with our [stability promise](https://prometheus.io/docs/prometheus/latest/stability/), +the Prometheus 3.0 release contains a number of backwards incompatible changes. +This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 and newer versions. ## Flags -The format of Prometheus command line flags has changed. Instead of a -single dash, all flags now use a double dash. Common flags (`--config.file`, -`--web.listen-address` and `--web.external-url`) remain but -almost all storage-related flags have been removed. - -Some notable flags which have been removed: - -- `-alertmanager.url` In Prometheus 2.0, the command line flags for configuring - a static Alertmanager URL have been removed. Alertmanager must now be - discovered via service discovery, see [Alertmanager service discovery](#alertmanager-service-discovery). +- The following feature flags have been removed and they have been added to the + default behavior of Prometheus v3: + - `promql-at-modifier` + - `promql-negative-offset` + - `remote-write-receiver` + - `new-service-discovery-manager` + - `expand-external-labels` + - Environment variable references `${var}` or `$var` in external label values + are replaced according to the values of the current environment variables. + - References to undefined variables are replaced by the empty string. + The `$` character can be escaped by using `$$`. + - `no-default-scrape-port` + - Prometheus v3 will no longer add ports to scrape targets according to the + specified scheme. Target will now appear in labels as configured. + - If you rely on scrape targets like + `https://example.com/metrics` or `http://exmaple.com/metrics` to be + represented as `https://example.com/metrics:443` and + `http://example.com/metrics:80` respectively, add them to your target URLs + - `agent` + - Instead use the dedicated `--agent` CLI flag. + - `auto-gomemlimit` + - Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux + container memory limit. If there is no container limit, or the process is + running outside of containers, the system memory total is used. To disable + this, `--no-auto-gomemlimit` is available. + - `auto-gomaxprocs` + - Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux + container CPU quota. To disable this, `--no-auto-gomaxprocs` is available. + + Prometheus v3 will log a warning if you continue to pass these to + `--enable-feature`. + +## Configuration + +- The scrape job level configuration option `scrape_classic_histograms` has been + renamed to `always_scrape_classic_histograms`. If you use the + `--enable-feature=native-histograms` feature flag to ingest native histograms + and you also want to ingest classic histograms that an endpoint might expose + along with native histograms, be sure to add this configuration or change your + configuration from the old name. +- The `http_config.enable_http2` in `remote_write` items default has been + changed to `false`. In Prometheus v2 the remote write http client would + default to use http2. In order to parallelize multiple remote write queues + across multiple sockets its preferable to not default to http2. + If you prefer to use http2 for remote write you must now set + `http_config.enable_http2: true` in your `remote_write` configuration section. -- `-log.format` In Prometheus 2.0 logs can only be streamed to standard error. - -- `-query.staleness-delta` has been renamed to `--query.lookback-delta`; Prometheus - 2.0 introduces a new mechanism for handling staleness, see [staleness](querying/basics.md#staleness). +## PromQL -- `-storage.local.*` Prometheus 2.0 introduces a new storage engine; as such all - flags relating to the old engine have been removed. For information on the - new engine, see [Storage](#storage). +- The `.` pattern in regular expressions in PromQL matches newline characters. + With this change a regular expressions like `.*` matches strings that include + `\n`. This applies to matchers in queries and relabel configs. + - For example, the following regular expressions now match the accompanying + strings, whereas in Prometheus v2 these combinations didn't match. + - `.*` additionally matches `foo\n` and `Foo\nBar` + - `foo.?bar` additionally matches `foo\nbar` + - `foo.+bar` additionally matches `foo\nbar` + - If you want Prometheus v3 to behave like v2, you will have to change your + regular expressions by replacing all `.` patterns with `[^\n]`, e.g. + `foo[^\n]*`. +- Lookback and range selectors are left open and right closed (previously left + closed and right closed). This change affects queries when the evaluation time + perfectly aligns with the sample timestamps. For example assume querying a + timeseries with evenly spaced samples exactly 1 minute apart. Before Prometheus + v3, a range query with `5m` would usually return 5 samples. But if the query + evaluation aligns perfectly with a scrape, it would return 6 samples. In + Prometheus v3 queries like this will always return 5 samples. + This change has likely few effects for everyday use, except for some subquery + use cases. + Query front-ends that align queries usually align subqueries to multiples of + the step size. These subqueries will likely be affected. + Tests are more likely to affected. To fix those either adjust the expected + number of samples or extend the range by less than one sample interval. +- The `holt_winters` function has been renamed to `double_exponential_smoothing` + and is now guarded by the `promql-experimental-functions` feature flag. + If you want to keep using `holt_winters`, you have to do both of these things: + - Rename `holt_winters` to `double_exponential_smoothing` in your queries. + - Pass `--enable-feature=promql-experimental-functions` in your Prometheus + CLI invocation. + +## Scrape protocols +Prometheus v3 is more strict concerning the Content-Type header received when +scraping. Prometheus v2 would default to the standard Prometheus text protocol +if the target being scraped did not specify a Content-Type header or if the +header was unparsable or unrecognised. This could lead to incorrect data being +parsed in the scrape. Prometheus v3 will now fail the scrape in such cases. + +If a scrape target is not providing the correct Content-Type header the +fallback protocol can be specified using the `fallback_scrape_protocol` +parameter. See [Prometheus scrape_config documentation.](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) + +This is a breaking change as scrapes that may have succeeded with Prometheus v2 +may now fail if this fallback protocol is not specified. -- `-storage.remote.*` Prometheus 2.0 has removed the deprecated remote - storage flags, and will fail to start if they are supplied. To write to - InfluxDB, Graphite, or OpenTSDB use the relevant storage adapter. +## Miscellaneous -## Alertmanager service discovery +### TSDB format and downgrade -Alertmanager service discovery was introduced in Prometheus 1.4, allowing Prometheus -to dynamically discover Alertmanager replicas using the same mechanism as scrape -targets. In Prometheus 2.0, the command line flags for static Alertmanager config -have been removed, so the following command line flag: +The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes +to the index format. Consequently, a Prometheus v3 TSDB can only be read by a +Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only +able to downgrade to v2.55, not lower, without loosing your TSDB persitent data. -``` -./prometheus -alertmanager.url=http://alertmanager:9093/ -``` +As an extra safety measure, you could optionally consider upgrading to v2.55 first and +confirm Prometheus works as expected, before upgrading to v3. -Would be replaced with the following in the `prometheus.yml` config file: +### TSDB storage contract -```yaml -alerting: - alertmanagers: - - static_configs: - - targets: - - alertmanager:9093 -``` +TSDB compatible storage is now expected to return results matching the specified +selectors. This might impact some third party implementations, most likely +implementing `remote_read`. -You can also use all the usual Prometheus service discovery integrations and -relabeling in your Alertmanager configuration. This snippet instructs -Prometheus to search for Kubernetes pods, in the `default` namespace, with the -label `name: alertmanager` and with a non-empty port. +This contract is not explicitly enforced, but can cause undefined behavior. -```yaml -alerting: - alertmanagers: - - kubernetes_sd_configs: - - role: pod - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: [__meta_kubernetes_pod_label_name] - regex: alertmanager - action: keep - - source_labels: [__meta_kubernetes_namespace] - regex: default - action: keep - - source_labels: [__meta_kubernetes_pod_container_port_number] - regex: - action: drop -``` +### UTF-8 names -## Recording rules and alerts +Prometheus v3 supports UTF-8 in metric and label names. This means metric and +label names can change after upgrading according to what is exposed by +endpoints. Furthermore, metric and label names that would have previously been +flagged as invalid no longer will be. -The format for configuring alerting and recording rules has been changed to YAML. -An example of a recording rule and alert in the old format: +Users wishing to preserve the original validation behavior can update their +Prometheus yaml configuration to specify the legacy validation scheme: ``` -job:request_duration_seconds:histogram_quantile99 = - histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m]))) - -ALERT FrontendRequestLatency - IF job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 - FOR 5m - ANNOTATIONS { - summary = "High frontend request latency", - } +global: + metric_name_validation_scheme: legacy ``` -Would look like this: +Or on a per-scrape basis: -```yaml -groups: -- name: example.rules - rules: - - record: job:request_duration_seconds:histogram_quantile99 - expr: histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m]))) - - alert: FrontendRequestLatency - expr: job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 - for: 5m - annotations: - summary: High frontend request latency ``` - -To help with the change, the `promtool` tool has a mode to automate the rules conversion. Given a `.rules` file, it will output a `.rules.yml` file in the -new format. For example: - -``` -$ promtool update rules example.rules +scrape_configs: + - job_name: job1 + metric_name_validation_scheme: utf8 + - job_name: job2 + metric_name_validation_scheme: legacy ``` -You will need to use `promtool` from [Prometheus 2.5](https://github.com/prometheus/prometheus/releases/tag/v2.5.0) as later versions no longer contain the above subcommand. - -## Storage - -The data format in Prometheus 2.0 has completely changed and is not backwards -compatible with 1.8 and older versions. To retain access to your historic monitoring data we -recommend you run a non-scraping Prometheus instance running at least version -1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server -read existing data from the old one via the remote read protocol. - -Your Prometheus 1.8 instance should be started with the following flags and an -config file containing only the `external_labels` setting (if any): +### Log message format +Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This +results in a change of log message format. An example of the old log format is: ``` -$ ./prometheus-1.8.1.linux-amd64/prometheus -web.listen-address ":9094" -config.file old.yml +ts=2024-10-23T22:01:06.074Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d +ts=2024-10-23T22:01:06.074Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=91d80252c3e528728b0f88d254dd720f6be07cb8-modified)" +ts=2024-10-23T22:01:06.074Z caller=main.go:676 level=info build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" +ts=2024-10-23T22:01:06.074Z caller=main.go:677 level=info host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" ``` -Prometheus 2.0 can then be started (on the same machine) with the following flags: +a similar sequence in the new log format looks like this: ``` -$ ./prometheus-2.0.0.linux-amd64/prometheus --config.file prometheus.yml -``` - -Where `prometheus.yml` contains in addition to your full existing configuration, the stanza: - -```yaml -remote_read: - - url: "http://localhost:9094/api/v1/read" +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:640 msg="No time or size retention was set so using the default time retention" duration=15d +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:681 msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=7c7116fea8343795cae6da42960cacd0207a2af8)" +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:686 msg="operational information" build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" fd_limits="(soft=1048576, hard=1048576)" vm_limits="(soft=unlimited, hard=unlimited)" ``` -## PromQL - -The following features have been removed from PromQL: - -- `drop_common_labels` function - the `without` aggregation modifier should be used - instead. -- `keep_common` aggregation modifier - the `by` modifier should be used instead. -- `count_scalar` function - use cases are better handled by `absent()` or correct - propagation of labels in operations. - -See [issue #3060](https://github.com/prometheus/prometheus/issues/3060) for more -details. +### `le` and `quantile` label values +In Prometheus v3, the values of the `le` label of classic histograms and the +`quantile` label of summaries are normalized upon ingestion. In Prometheus v2 +the value of these labels depended on the scrape protocol (protobuf vs text +format) in some situations. This led to label values changing based on the +scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be +ingested as `my_classic_hist{le="1"}` via the text format, but as +`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the +metric and caused problems when querying the metric. +In Prometheus v3 these label values will always be normalized to a float like +representation. I.e. the above example will always result in +`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which +protocol. The effect of this change is that alerts, recording rules and +dashboards that directly reference label values as whole numbers such as +`le="1"` will stop working. -## Miscellaneous - -### Prometheus non-root user +Ways to deal with this change either globally or on a per metric basis: -The Prometheus Docker image is now built to [run Prometheus -as a non-root user](https://github.com/prometheus/prometheus/pull/2859). If you -want the Prometheus UI/API to listen on a low port number (say, port 80), you'll -need to override it. For Kubernetes, you would use the following YAML: +- Fix references to integer `le`, `quantile` label values, but otherwise do +nothing and accept that some queries that span the transition time will produce +inaccurate or unexpected results. +_This is the recommended solution._ +- Use `metric_relabel_config` to retain the old labels when scraping targets. +This should **only** be applied to metrics that currently produce such labels. ```yaml -apiVersion: v1 -kind: Pod -metadata: - name: security-context-demo-2 -spec: - securityContext: - runAsUser: 0 -... -``` - -See [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) -for more details. - -If you're using Docker, then the following snippet would be used: + metric_relabel_configs: + - source_labels: + - quantile + target_label: quantile + regex: (\d+)\.0+ + - source_labels: + - le + - __name__ + target_label: le + regex: (\d+)\.0+;.*_bucket +``` + +### Disallow configuring Alertmanager with the v1 API +Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3 +requires [Alertmanager 0.16.0](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0) or later. Users with older Alertmanager +versions or configurations that use `alerting: alertmanagers: [api_version: v1]` +need to upgrade Alertmanager and change their configuration to use `api_version: v2`. -``` -docker run -p 9090:9090 prom/prometheus:latest -``` - -### Prometheus lifecycle +# Prometheus 2.0 migration guide -If you use the Prometheus `/-/reload` HTTP endpoint to [automatically reload your -Prometheus config when it changes](configuration/configuration.md), -these endpoints are disabled by default for security reasons in Prometheus 2.0. -To enable them, set the `--web.enable-lifecycle` flag. +For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/). diff --git a/docs/querying/api.md b/docs/querying/api.md index efa244fbc8b..87de463288c 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -59,7 +59,7 @@ timestamps are always represented as Unix timestamps in seconds. * ``: Prometheus [time series selectors](basics.md#time-series-selectors) like `http_requests_total` or `http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. -* ``: [Prometheus duration strings](basics.md#time-durations). +* ``: [the subset of Prometheus float literals using time units](basics.md#float-literals-and-time-durations). For example, `5m` refers to a duration of 5 minutes. * ``: boolean values (strings `true` and `false`). @@ -239,6 +239,75 @@ $ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar' } ``` +## Parsing a PromQL expressions into a abstract syntax tree (AST) + +This endpoint is **experimental** and might change in the future. It is currently only meant to be used by Prometheus' own web UI, and the endpoint name and exact format returned may change from one Prometheus version to another. It may also be removed again in case it is no longer needed by the UI. + +The following endpoint parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation: + +``` +GET /api/v1/parse_query +POST /api/v1/parse_query +``` + +URL query parameters: + +- `query=`: Prometheus expression query string. + +You can URL-encode these parameters directly in the request body by using the `POST` method and +`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large +query that may breach server-side URL character limits. + +The `data` section of the query result is a string containing the AST of the parsed query expression. + +The following example parses the expression `foo/bar`: + +```json +$ curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar' +{ + "data" : { + "bool" : false, + "lhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "foo" + } + ], + "name" : "foo", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "matching" : { + "card" : "one-to-one", + "include" : [], + "labels" : [], + "on" : false + }, + "op" : "/", + "rhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "bar" + } + ], + "name" : "bar", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "type" : "binaryExpr" + }, + "status" : "success" +} +``` + ## Querying metadata Prometheus offers a set of API endpoints to query metadata about series and their labels. @@ -364,18 +433,40 @@ URL query parameters: series from which to read the label values. Optional. - `limit=`: Maximum number of returned series. Optional. 0 means disabled. - The `data` section of the JSON response is a list of string label values. -This example queries for all label values for the `job` label: +This example queries for all label values for the `http_status_code` label: ```json -$ curl http://localhost:9090/api/v1/label/job/values +$ curl http://localhost:9090/api/v1/label/http_status_code/values { "status" : "success", "data" : [ - "node", - "prometheus" + "200", + "504" + ] +} +``` + +Label names can optionally be encoded using the Values Escaping method, and is necessary if a name includes the `/` character. To encode a name in this way: + +* Prepend the label with `U__`. +* Letters, numbers, and colons appear as-is. +* Convert single underscores to double underscores. +* For all other characters, use the UTF-8 codepoint as a hex integer, surrounded + by underscores. So ` ` becomes `_20_` and a `.` becomes `_2e_`. + + More information about text escaping can be found in the original UTF-8 [Proposal document](https://github.com/prometheus/proposals/blob/main/proposals/2023-08-21-utf8.md#text-escaping). + +This example queries for all label values for the `http.status_code` label: + +```json +$ curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values +{ + "status" : "success", + "data" : [ + "200", + "404" ] } ``` @@ -499,7 +590,7 @@ Instant vectors are returned as result type `vector`. The corresponding Each series could have the `"value"` key, or the `"histogram"` key, but not both. Series are not guaranteed to be returned in any particular order unless a function -such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)` +such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label) is used. ### Scalars @@ -693,8 +784,10 @@ URL query parameters: - `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. - `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. -- `exclude_alerts=`: only return rules, do not return active alerts. +- `exclude_alerts=`: only return rules, do not return active alerts. - `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. +- `group_limit=`: The `group_limit` parameter allows you to specify a limit for the number of rule groups that is returned in a single response. If the total number of rule groups exceeds the specified `group_limit` value, the response will include a `groupNextToken` property. You can use the value of this `groupNextToken` property in subsequent requests in the `group_next_token` parameter to paginate over the remaining rule groups. The `groupNextToken` property will not be present in the final response, indicating that you have retrieved all the available rule groups. Please note that there are no guarantees regarding the consistency of the response if the rule groups are being modified during the pagination process. +- `group_next_token`: the pagination token that was returned in previous request when the `group_limit` property is set. The pagination token is used to iteratively paginate over a large number of rule groups. To use the `group_next_token` parameter, the `group_limit` parameter also need to be present. If a rule group that coincides with the next token is removed while you are paginating over the rule groups, a response with status code 400 will be returned. ```json $ curl http://localhost:9090/api/v1/rules @@ -834,7 +927,7 @@ curl -G http://localhost:9091/api/v1/targets/metadata \ ``` The following example returns metadata for all metrics for all targets with -label `instance="127.0.0.1:9090`. +label `instance="127.0.0.1:9090"`. ```json curl -G http://localhost:9091/api/v1/targets/metadata \ @@ -1119,9 +1212,11 @@ The following endpoint returns various cardinality statistics about the Promethe GET /api/v1/status/tsdb ``` URL query parameters: + - `limit=`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned. -The `data` section of the query result consists of +The `data` section of the query result consists of: + - **headStats**: This provides the following data about the head block of the TSDB: - **numSeries**: The number of series. - **chunkCount**: The number of chunks. @@ -1197,13 +1292,13 @@ The following endpoint returns information about the WAL replay: GET /api/v1/status/walreplay ``` -**read**: The number of segments replayed so far. -**total**: The total number segments needed to be replayed. -**progress**: The progress of the replay (0 - 100%). -**state**: The state of the replay. Possible states: -- **waiting**: Waiting for the replay to start. -- **in progress**: The replay is in progress. -- **done**: The replay has finished. +- **read**: The number of segments replayed so far. +- **total**: The total number segments needed to be replayed. +- **progress**: The progress of the replay (0 - 100%). +- **state**: The state of the replay. Possible states: + - **waiting**: Waiting for the replay to start. + - **in progress**: The replay is in progress. + - **done**: The replay has finished. ```json $ curl http://localhost:9090/api/v1/status/walreplay @@ -1319,8 +1414,74 @@ is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping. -Enable the OTLP receiver by the feature flag -`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver +Enable the OTLP receiver by setting +`--web.enable-otlp-receiver`. When enabled, the OTLP receiver endpoint is `/api/v1/otlp/v1/metrics`. *New in v2.47* + +## Notifications + +The following endpoints provide information about active status notifications concerning the Prometheus server itself. +Notifications are used in the web UI. + +These endpoints are **experimental**. They may change in the future. + +### Active Notifications + +The `/api/v1/notifications` endpoint returns a list of all currently active notifications. + +``` +GET /api/v1/notifications +``` + +Example: + +``` +$ curl http://localhost:9090/api/v1/notifications +{ + "status": "success", + "data": [ + { + "text": "Prometheus is shutting down and gracefully stopping all operations.", + "date": "2024-10-07T12:33:08.551376578+02:00", + "active": true + } + ] +} +``` + +*New in v3.0* + +### Live Notifications + +The `/api/v1/notifications/live` endpoint streams live notifications as they occur, using [Server-Sent Events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events). Deleted notifications are sent with `active: false`. Active notifications will be sent when connecting to the endpoint. + +``` +GET /api/v1/notifications/live +``` + +Example: + +``` +$ curl http://localhost:9090/api/v1/notifications/live +data: { + "status": "success", + "data": [ + { + "text": "Prometheus is shutting down and gracefully stopping all operations.", + "date": "2024-10-07T12:33:08.551376578+02:00", + "active": true + } + ] +} +``` + +**Note:** The `/notifications/live` endpoint will return a `204 No Content` response if the maximum number of subscribers has been reached. You can set the maximum number of listeners with the flag `--web.max-notifications-subscribers`, which defaults to 16. + +``` +GET /api/v1/notifications/live +204 No Content +``` + +*New in v3.0* diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 81ffb4e0f39..1c06afb85d4 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,8 +35,9 @@ evaluate to one of four types: Depending on the use-case (e.g. when graphing vs. displaying the output of an expression), only some of these types are legal as the result of a -user-specified expression. For example, an expression that returns an instant -vector is the only type which can be graphed. +user-specified expression. +For [instant queries](api.md#instant-queries), any of the above data types are allowed as the root of the expression. +[Range queries](api.md/#range-queries) only support scalar-typed and instant-vector-typed expressions. _Notes about the experimental native histograms:_ @@ -68,9 +69,10 @@ Example: 'these are unescaped: \n \\ \t' `these are not unescaped: \n ' " \t` -### Float literals +### Float literals and time durations -Scalar float values can be written as literal integer or floating-point numbers in the format (whitespace only included for better readability): +Scalar float values can be written as literal integer or floating-point numbers +in the format (whitespace only included for better readability): [-+]?( [0-9]*\.?[0-9]+([eE][-+]?[0-9]+)? @@ -87,16 +89,53 @@ Examples: 0x8f -Inf NaN - -As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. +Additionally, underscores (`_`) can be used in between decimal or hexadecimal +digits to improve readability. Examples: - 1s # Equivalent to 1.0 - 2m # Equivalent to 120.0 - 1ms # Equivalent to 0.001 - + 1_000_000 + .123_456_789 + 0x_53_AB_F3_82 + +Float literals are also used to specify durations in seconds. For convenience, +decimal integer numbers may be combined with the following +time units: + +* `ms` – milliseconds +* `s` – seconds – 1s equals 1000ms +* `m` – minutes – 1m equals 60s (ignoring leap seconds) +* `h` – hours – 1h equals 60m +* `d` – days – 1d equals 24h (ignoring so-called daylight saving time) +* `w` – weeks – 1w equals 7d +* `y` – years – 1y equals 365d (ignoring leap days) + +Suffixing a decimal integer number with one of the units above is a different +representation of the equivalent number of seconds as a bare float literal. + +Examples: + + 1s # Equivalent to 1. + 2m # Equivalent to 120. + 1ms # Equivalent to 0.001. + -2h # Equivalent to -7200. + +The following examples do _not_ work: + + 0xABm # No suffixing of hexadecimal numbers. + 1.5h # Time units cannot be combined with a floating point. + +Infd # No suffixing of ±Inf or NaN. + +Multiple units can be combined by concatenation of suffixed integers. Units +must be ordered from the longest to the shortest. A given unit must only appear +once per float literal. + +Examples: + + 1h30m # Equivalent to 5400s and thus 5400. + 12h34m56s # Equivalent to 45296s and thus 45296. + 54s321ms # Equivalent to 54.321. ## Time series selectors @@ -109,8 +148,16 @@ single sample value for each at a given timestamp (point in time). In the simpl form, only a metric name is specified, which results in an instant vector containing elements for all time series that have this metric name. +The value returned will be that of the most recent sample at or before the +query's evaluation timestamp (in the case of an +[instant query](api.md#instant-queries)) +or the current step within the query (in the case of a +[range query](api.md/#range-queries)). +The [`@` modifier](#modifier) allows overriding the timestamp relative to which +the selection takes place. Time series are only returned if their most recent sample is less than the [lookback period](#staleness) ago. + This example selects all time series that have the `http_requests_total` metric -name: +name, returning the most recent sample for each: http_requests_total @@ -200,53 +247,22 @@ syntax](https://github.com/google/re2/wiki/Syntax). ### Range Vector Selectors Range vector literals work like instant vector literals, except that they -select a range of samples back from the current instant. Syntactically, a [time -duration](#time-durations) is appended in square brackets (`[]`) at the end of -a vector selector to specify how far back in time values should be fetched for -each resulting range vector element. The range is a closed interval, -i.e. samples with timestamps coinciding with either boundary of the range are -still included in the selection. - -In this example, we select all the values we have recorded within the last 5 -minutes for all time series that have the metric name `http_requests_total` and -a `job` label set to `prometheus`: +select a range of samples back from the current instant. Syntactically, a +[float literal](#float-literals-and-time-durations) is appended in square +brackets (`[]`) at the end of a vector selector to specify for how many seconds +back in time values should be fetched for each resulting range vector element. +Commonly, the float literal uses the syntax with one or more time units, e.g. +`[5m]`. The range is a left-open and right-closed interval, i.e. samples with +timestamps coinciding with the left boundary of the range are excluded from the +selection, while samples coinciding with the right boundary of the range are +included in the selection. + +In this example, we select all the values recorded less than 5m ago for all +time series that have the metric name `http_requests_total` and a `job` label +set to `prometheus`: http_requests_total{job="prometheus"}[5m] -### Time Durations - -Time durations are specified as a number, followed immediately by one of the -following units: - -* `ms` - milliseconds -* `s` - seconds -* `m` - minutes -* `h` - hours -* `d` - days - assuming a day always has 24h -* `w` - weeks - assuming a week always has 7d -* `y` - years - assuming a year always has 365d1 - -1 For days in a year, the leap day is ignored, and conversely, for a minute, a leap second is ignored. - -Time durations can be combined by concatenation. Units must be ordered from the -longest to the shortest. A given unit must only appear once in a time duration. - -Here are some examples of valid time durations: - - 5h - 1h30m - 5m - 10s - - -As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. - -Examples: - - 1.0 # Equivalent to 1s - 0.001 # Equivalent to 1ms - 120 # Equivalent to 2m - ### Offset modifier The `offset` modifier allows changing the time offset for individual @@ -329,7 +345,7 @@ Note that the `@` modifier allows a query to look ahead of its evaluation time. Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector. -Syntax: ` '[' ':' [] ']' [ @ ] [ offset ]` +Syntax: ` '[' ':' [] ']' [ @ ] [ offset ]` * `` is optional. Default is the global evaluation interval. @@ -358,8 +374,9 @@ independently of the actual present time series data. This is mainly to support cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated time series do not precisely align in time. Because of their independence, Prometheus needs to assign a value at those timestamps for each relevant time -series. It does so by taking the newest sample before this timestamp within the lookback period. -The lookback period is 5 minutes by default. +series. It does so by taking the newest sample that is less than the lookback period ago. +The lookback period is 5 minutes by default, but can be +[set with the `--query.lookback-delta` flag](../command-line/prometheus.md) If a target scrape or rule evaluation no longer returns a sample for a time series that was previously present, this time series will be marked as stale. diff --git a/docs/querying/functions.md b/docs/querying/functions.md index c6e22019fc6..310b7b9337d 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -326,45 +326,70 @@ With native histograms, aggregating everything works as usual without any `by` c histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m]))) -The `histogram_quantile()` function interpolates quantile values by -assuming a linear distribution within a bucket. +In the (common) case that a quantile value does not coincide with a bucket +boundary, the `histogram_quantile()` function interpolates the quantile value +within the bucket the quantile value falls into. For classic histograms, for +native histograms with custom bucket boundaries, and for the zero bucket of +other native histograms, it assumes a uniform distribution of observations +within the bucket (also called _linear interpolation_). For the +non-zero-buckets of native histograms with a standard exponential bucketing +schema, the interpolation is done under the assumption that the samples within +the bucket are distributed in a way that they would uniformly populate the +buckets in a hypothetical histogram with higher resolution. (This is also +called _exponential interpolation_.) If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. -The following is only relevant for classic histograms: If `b` contains -fewer than two buckets, `NaN` is returned. The highest bucket must have an -upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located -in the highest bucket, the upper bound of the second highest bucket is -returned. A lower limit of the lowest bucket is assumed to be 0 if the upper -bound of that bucket is greater than -0. In that case, the usual linear interpolation is applied within that -bucket. Otherwise, the upper bound of the lowest bucket is returned for -quantiles located in the lowest bucket. - -You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in -a histogram. - -You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in -a histogram. - -Buckets of classic histograms are cumulative. Therefore, the following should always be the case: - -* The counts in the buckets are monotonically increasing (strictly non-decreasing). -* A lack of observations between the upper limits of two consecutive buckets results in equal counts -in those two buckets. - -However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets -with `sum(rate(...))`) or invalid data might violate these assumptions. In that case, -`histogram_quantile` would be unable to return meaningful results. To mitigate the issue, -`histogram_quantile` assumes that tiny relative differences between consecutive buckets are happening -because of floating point precision errors and ignores them. (The threshold to ignore a difference -between two buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are -non-monotonic bucket counts even after this adjustment, they are increased to the value of the -previous buckets to enforce monotonicity. The latter is evidence for an actual issue with the input -data and is therefore flagged with an informational annotation reading `input to histogram_quantile -needed to be fixed for monotonicity`. If you encounter this annotation, you should find and remove -the source of the invalid data. +Special cases for classic histograms: + +* If `b` contains fewer than two buckets, `NaN` is returned. +* The highest bucket must have an upper bound of `+Inf`. (Otherwise, `NaN` is + returned.) +* If a quantile is located in the highest bucket, the upper bound of the second + highest bucket is returned. +* The lower limit of the lowest bucket is assumed to be 0 if the upper bound of + that bucket is greater than 0. In that case, the usual linear interpolation + is applied within that bucket. Otherwise, the upper bound of the lowest + bucket is returned for quantiles located in the lowest bucket. + +Special cases for native histograms (relevant for the exact interpolation +happening within the zero bucket): + +* A zero bucket with finite width is assumed to contain no negative + observations if the histogram has observations in positive buckets, but none + in negative buckets. +* A zero bucket with finite width is assumed to contain no positive + observations if the histogram has observations in negative buckets, but none + in positive buckets. + +You can use `histogram_quantile(0, v instant-vector)` to get the estimated +minimum value stored in a histogram. + +You can use `histogram_quantile(1, v instant-vector)` to get the estimated +maximum value stored in a histogram. + +Buckets of classic histograms are cumulative. Therefore, the following should +always be the case: + +* The counts in the buckets are monotonically increasing (strictly + non-decreasing). +* A lack of observations between the upper limits of two consecutive buckets + results in equal counts in those two buckets. + +However, floating point precision issues (e.g. small discrepancies introduced +by computing of buckets with `sum(rate(...))`) or invalid data might violate +these assumptions. In that case, `histogram_quantile` would be unable to return +meaningful results. To mitigate the issue, `histogram_quantile` assumes that +tiny relative differences between consecutive buckets are happening because of +floating point precision errors and ignores them. (The threshold to ignore a +difference between two buckets is a trillionth (1e-12) of the sum of both +buckets.) Furthermore, if there are non-monotonic bucket counts even after this +adjustment, they are increased to the value of the previous buckets to enforce +monotonicity. The latter is evidence for an actual issue with the input data +and is therefore flagged with an informational annotation reading `input to +histogram_quantile needed to be fixed for monotonicity`. If you encounter this +annotation, you should find and remove the source of the invalid data. ## `histogram_stddev()` and `histogram_stdvar()` @@ -380,15 +405,22 @@ do not show up in the returned vector. Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard variance of observations in a native histogram. -## `holt_winters()` +## `double_exponential_smoothing()` -`holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** + +`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a smoothed value for time series based on the range in `v`. The lower the smoothing factor `sf`, the more importance is given to old data. The higher the trend factor `tf`, the more trends in the data is considered. Both `sf` and `tf` must be between 0 and 1. +For additional details, refer to [NIST Engineering Statistics Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). +In Prometheus V2 this function was called `holt_winters`. This caused confusion +since the Holt-Winters method usually refers to triple exponential smoothing. +Double exponential smoothing as implemented here is also referred to as "Holt +Linear". -`holt_winters` should only be used with gauges. +`double_exponential_smoothing` should only be used with gauges. ## `hour()` diff --git a/docs/querying/remote_read_api.md b/docs/querying/remote_read_api.md index efbd08e9846..76de1123420 100644 --- a/docs/querying/remote_read_api.md +++ b/docs/querying/remote_read_api.md @@ -17,7 +17,8 @@ Request are made to the following endpoint. ### Samples -This returns a message that includes a list of raw samples. +This returns a message that includes a list of raw samples matching the +requested query. ### Streamed Chunks diff --git a/docs/stability.md b/docs/stability.md index 1fd2e51e0c6..cb30b8ad992 100644 --- a/docs/stability.md +++ b/docs/stability.md @@ -9,7 +9,7 @@ Prometheus promises API stability within a major version, and strives to avoid breaking changes for key features. Some features, which are cosmetic, still under development, or depend on 3rd party services, are not covered by this. -Things considered stable for 2.x: +Things considered stable for 3.x: * The query language and data model * Alerting and recording rules @@ -18,21 +18,25 @@ Things considered stable for 2.x: * Configuration file format (minus the service discovery remote read/write, see below) * Rule/alert file format * Console template syntax and semantics -* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/). +* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/) and receiving +* Agent mode +* OTLP receiver endpoint -Things considered unstable for 2.x: +Things considered unstable for 3.x: * Any feature listed as experimental or subject to change, including: - * The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458) - * Remote write receiving, remote read and the remote read endpoint + * The [`double_exponential_smoothing` PromQL function](https://github.com/prometheus/prometheus/issues/2458) + * Remote read and the remote read endpoint * Server-side HTTPS and basic authentication -* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs` +* Service discovery integrations, with the exception of `static_configs`, `file_sd_configs` and `http_sd_config` * Go APIs of packages that are part of the server * HTML generated by the web UI * The metrics in the /metrics endpoint of Prometheus itself * Exact on-disk format. Potential changes however, will be forward compatible and transparently handled by Prometheus * The format of the logs +Prometheus 2.x stability guarantees can be found [in the 2.x documentation](https://prometheus.io/docs/prometheus/2.55/stability/). + As long as you are not using any features marked as experimental/unstable, an upgrade within a major version can usually be performed without any operational adjustments and very little risk that anything will break. Any breaking changes diff --git a/docs/storage.md b/docs/storage.md index 8f5f42b8c6a..2142c970ffb 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -87,10 +87,9 @@ or 31 days, whichever is smaller. Prometheus has several flags that configure local storage. The most important are: - `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. -- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is - set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention` - nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`. - Supported units: y, w, d, h, m, s, ms. +- `--storage.tsdb.retention.time`: How long to retain samples in storage. If neither + this flag nor `storage.tsdb.retention.size` is set, the retention time defaults to + `15d`. Supported units: y, w, d, h, m, s, ms. - `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only @@ -98,7 +97,6 @@ Prometheus has several flags that configure local storage. The most important ar chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours). -- `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`. - `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. @@ -146,7 +144,7 @@ a buffer, ensuring that older entries will be removed before the allocated stora for Prometheus becomes full. At present, we recommend setting the retention size to, at most, 80-85% of your -allocated Prometheus disk space. This increases the likelihood that older entires +allocated Prometheus disk space. This increases the likelihood that older entries will be removed prior to hitting any disk limitations. ## Remote storage integrations diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index 8ccbafe6f1c..128132a8d2d 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -18,6 +18,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "os" @@ -26,10 +27,9 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" prom_discovery "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -41,7 +41,7 @@ var ( a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.") outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String() listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String() - logger log.Logger + logger *slog.Logger // addressLabel is the name for the label containing a target's address. addressLabel = model.MetaLabelPrefix + "consul_address" @@ -90,7 +90,7 @@ type discovery struct { address string refreshInterval int tagSeparator string - logger log.Logger + logger *slog.Logger oldSourceList map[string]bool } @@ -164,7 +164,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { var srvs map[string][]string resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) if err != nil { - level.Error(d.logger).Log("msg", "Error getting services list", "err", err) + d.logger.Error("Error getting services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -173,7 +173,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { io.Copy(io.Discard, resp.Body) resp.Body.Close() if err != nil { - level.Error(d.logger).Log("msg", "Error reading services list", "err", err) + d.logger.Error("Error reading services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -181,7 +181,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { err = json.Unmarshal(b, &srvs) resp.Body.Close() if err != nil { - level.Error(d.logger).Log("msg", "Error parsing services list", "err", err) + d.logger.Error("Error parsing services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -200,13 +200,13 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/service/%s", d.address, name)) if err != nil { - level.Error(d.logger).Log("msg", "Error getting services nodes", "service", name, "err", err) + d.logger.Error("Error getting services nodes", "service", name, "err", err) break } tg, err := d.parseServiceNodes(resp, name) if err != nil { - level.Error(d.logger).Log("msg", "Error parsing services nodes", "service", name, "err", err) + d.logger.Error("Error parsing services nodes", "service", name, "err", err) break } tgs = append(tgs, tg) @@ -254,8 +254,7 @@ func main() { fmt.Println("err: ", err) return } - logger = log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout)) - logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + logger = promslog.New(&promslog.Config{}) ctx := context.Background() @@ -272,7 +271,7 @@ func main() { } if err != nil { - level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err) + logger.Error("failed to create discovery metrics", "err", err) os.Exit(1) } @@ -280,7 +279,7 @@ func main() { refreshMetrics := prom_discovery.NewRefreshMetrics(reg) metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics) if err != nil { - level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) + logger.Error("failed to register service discovery metrics", "err", err) os.Exit(1) } diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index dcf5a2b78c6..b242c4eaa0d 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -18,13 +18,12 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "os" "path/filepath" "reflect" "sort" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -55,7 +54,7 @@ type Adapter struct { manager *discovery.Manager output string name string - logger log.Logger + logger *slog.Logger } func mapToArray(m map[string]*customSD) []customSD { @@ -106,7 +105,7 @@ func (a *Adapter) refreshTargetGroups(allTargetGroups map[string][]*targetgroup. a.groups = tempGroups err := a.writeOutput() if err != nil { - level.Error(log.With(a.logger, "component", "sd-adapter")).Log("err", err) + a.logger.With("component", "sd-adapter").Error("failed to write output", "err", err) } } } @@ -163,7 +162,7 @@ func (a *Adapter) Run() { } // NewAdapter creates a new instance of Adapter. -func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { +func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger *slog.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { return &Adapter{ ctx: ctx, disc: d, diff --git a/documentation/examples/prometheus-otlp.yml b/documentation/examples/prometheus-otlp.yml new file mode 100644 index 00000000000..f0a8ab8b118 --- /dev/null +++ b/documentation/examples/prometheus-otlp.yml @@ -0,0 +1,31 @@ +# my global config +global: + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + +otlp: + # Recommended attributes to be promoted to labels. + promote_resource_attributes: + - service.instance.id + - service.name + - service.namespace + - cloud.availability_zone + - cloud.region + - container.name + - deployment.environment.name + - k8s.cluster.name + - k8s.container.name + - k8s.cronjob.name + - k8s.daemonset.name + - k8s.deployment.name + - k8s.job.name + - k8s.namespace.name + - k8s.pod.name + - k8s.replicaset.name + - k8s.statefulset.name + # Ingest OTLP data keeping UTF-8 characters in metric/label names. + translation_strategy: NoUTF8EscapingWithSuffixes + +storage: + # OTLP is a push-based protocol, Out of order samples is a common scenario. + tsdb: + out_of_order_time_window: 30m diff --git a/documentation/examples/prometheus-ovhcloud.yml b/documentation/examples/prometheus-ovhcloud.yml index 21facad1caf..b2cc60af25e 100644 --- a/documentation/examples/prometheus-ovhcloud.yml +++ b/documentation/examples/prometheus-ovhcloud.yml @@ -1,4 +1,4 @@ -# An example scrape configuration for running Prometheus with Ovhcloud. +# An example scrape configuration for running Prometheus with OVHcloud. scrape_configs: - job_name: 'ovhcloud' ovhcloud_sd_configs: diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 8ed5084d913..0aad437588a 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -4,12 +4,11 @@ go 1.22.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.6 - github.com/prometheus/client_golang v1.20.2 - github.com/prometheus/common v0.57.0 + github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 ) @@ -26,6 +25,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -55,11 +55,11 @@ require ( go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/grpc v1.65.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 1abeff7eb16..936b448d848 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= -github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= -github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -344,20 +344,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -373,17 +373,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go index 36242a8f4da..b02560dbabf 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go @@ -16,19 +16,19 @@ package graphite import ( "bytes" "fmt" + "log/slog" "math" "net" "sort" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" ) // Client allows sending batches of Prometheus samples to Graphite. type Client struct { - logger log.Logger + logger *slog.Logger address string transport string @@ -37,9 +37,9 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client { +func NewClient(logger *slog.Logger, address, transport string, timeout time.Duration, prefix string) *Client { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Client{ logger: logger, @@ -93,7 +93,7 @@ func (c *Client) Write(samples model.Samples) error { t := float64(s.Timestamp.UnixNano()) / 1e9 v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send value to Graphite, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send value to Graphite, skipping sample", "value", v, "sample", s) continue } fmt.Fprintf(&buf, "%s %f %f\n", k, v, t) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index e84ed9e129d..6ae40f81739 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -17,22 +17,22 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "math" "os" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/prompb" ) // Client allows sending batches of Prometheus samples to InfluxDB. type Client struct { - logger log.Logger + logger *slog.Logger client influx.Client database string @@ -41,16 +41,16 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client { +func NewClient(logger *slog.Logger, conf influx.HTTPConfig, db, rp string) *Client { c, err := influx.NewHTTPClient(conf) // Currently influx.NewClient() *should* never return an error. if err != nil { - level.Error(logger).Log("err", err) + logger.Error("Error creating influx HTTP client", "err", err) os.Exit(1) } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Client{ @@ -84,7 +84,7 @@ func (c *Client) Write(samples model.Samples) error { for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) c.ignoredSamples.Inc() continue } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index bb348aba7f3..7f62990d2ed 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -17,6 +17,7 @@ package main import ( "fmt" "io" + "log/slog" "net/http" _ "net/http/pprof" "net/url" @@ -26,16 +27,14 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" - "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/promslog" + "github.com/prometheus/common/promslog/flag" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb" @@ -57,7 +56,7 @@ type config struct { remoteTimeout time.Duration listenAddr string telemetryPath string - promlogConfig promlog.Config + promslogConfig promslog.Config } var ( @@ -105,11 +104,11 @@ func main() { cfg := parseFlags() http.Handle(cfg.telemetryPath, promhttp.Handler()) - logger := promlog.New(&cfg.promlogConfig) + logger := promslog.New(&cfg.promslogConfig) writers, readers := buildClients(logger, cfg) if err := serve(logger, cfg.listenAddr, writers, readers); err != nil { - level.Error(logger).Log("msg", "Failed to listen", "addr", cfg.listenAddr, "err", err) + logger.Error("Failed to listen", "addr", cfg.listenAddr, "err", err) os.Exit(1) } } @@ -120,7 +119,7 @@ func parseFlags() *config { cfg := &config{ influxdbPassword: os.Getenv("INFLUXDB_PW"), - promlogConfig: promlog.Config{}, + promslogConfig: promslog.Config{}, } a.Flag("graphite-address", "The host:port of the Graphite server to send samples to. None, if empty."). @@ -146,7 +145,7 @@ func parseFlags() *config { a.Flag("web.telemetry-path", "Address to listen on for web endpoints."). Default("/metrics").StringVar(&cfg.telemetryPath) - flag.AddFlags(a, &cfg.promlogConfig) + flag.AddFlags(a, &cfg.promslogConfig) _, err := a.Parse(os.Args[1:]) if err != nil { @@ -168,19 +167,19 @@ type reader interface { Name() string } -func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { +func buildClients(logger *slog.Logger, cfg *config) ([]writer, []reader) { var writers []writer var readers []reader if cfg.graphiteAddress != "" { c := graphite.NewClient( - log.With(logger, "storage", "Graphite"), + logger.With("storage", "Graphite"), cfg.graphiteAddress, cfg.graphiteTransport, cfg.remoteTimeout, cfg.graphitePrefix) writers = append(writers, c) } if cfg.opentsdbURL != "" { c := opentsdb.NewClient( - log.With(logger, "storage", "OpenTSDB"), + logger.With("storage", "OpenTSDB"), cfg.opentsdbURL, cfg.remoteTimeout, ) @@ -189,7 +188,7 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { if cfg.influxdbURL != "" { url, err := url.Parse(cfg.influxdbURL) if err != nil { - level.Error(logger).Log("msg", "Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) + logger.Error("Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) os.Exit(1) } conf := influx.HTTPConfig{ @@ -199,7 +198,7 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { Timeout: cfg.remoteTimeout, } c := influxdb.NewClient( - log.With(logger, "storage", "InfluxDB"), + logger.With("storage", "InfluxDB"), conf, cfg.influxdbDatabase, cfg.influxdbRetentionPolicy, @@ -208,15 +207,15 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { writers = append(writers, c) readers = append(readers, c) } - level.Info(logger).Log("msg", "Starting up...") + logger.Info("Starting up...") return writers, readers } -func serve(logger log.Logger, addr string, writers []writer, readers []reader) error { +func serve(logger *slog.Logger, addr string, writers []writer, readers []reader) error { http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) { req, err := remote.DecodeWriteRequest(r.Body) if err != nil { - level.Error(logger).Log("msg", "Read error", "err", err.Error()) + logger.Error("Read error", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -238,21 +237,21 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) { compressed, err := io.ReadAll(r.Body) if err != nil { - level.Error(logger).Log("msg", "Read error", "err", err.Error()) + logger.Error("Read error", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } reqBuf, err := snappy.Decode(nil, compressed) if err != nil { - level.Error(logger).Log("msg", "Decode error", "err", err.Error()) + logger.Error("Decode error", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } var req prompb.ReadRequest if err := proto.Unmarshal(reqBuf, &req); err != nil { - level.Error(logger).Log("msg", "Unmarshal error", "err", err.Error()) + logger.Error("Unmarshal error", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -267,7 +266,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e var resp *prompb.ReadResponse resp, err = reader.Read(&req) if err != nil { - level.Warn(logger).Log("msg", "Error executing query", "query", req, "storage", reader.Name(), "err", err) + logger.Warn("Error executing query", "query", req, "storage", reader.Name(), "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -283,7 +282,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e compressed = snappy.Encode(nil, data) if _, err := w.Write(compressed); err != nil { - level.Warn(logger).Log("msg", "Error writing response", "storage", reader.Name(), "err", err) + logger.Warn("Error writing response", "storage", reader.Name(), "err", err) } }) @@ -309,12 +308,12 @@ func protoToSamples(req *prompb.WriteRequest) model.Samples { return samples } -func sendSamples(logger log.Logger, w writer, samples model.Samples) { +func sendSamples(logger *slog.Logger, w writer, samples model.Samples) { begin := time.Now() err := w.Write(samples) duration := time.Since(begin).Seconds() if err != nil { - level.Warn(logger).Log("msg", "Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples)) + logger.Warn("Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples)) failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) } sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go index abb1d0b7d39..433c70527a3 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go @@ -19,13 +19,12 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "math" "net/http" "net/url" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" ) @@ -36,14 +35,14 @@ const ( // Client allows sending batches of Prometheus samples to OpenTSDB. type Client struct { - logger log.Logger + logger *slog.Logger url string timeout time.Duration } // NewClient creates a new Client. -func NewClient(logger log.Logger, url string, timeout time.Duration) *Client { +func NewClient(logger *slog.Logger, url string, timeout time.Duration) *Client { return &Client{ logger: logger, url: url, @@ -78,7 +77,7 @@ func (c *Client) Write(samples model.Samples) error { for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) continue } metric := TagValue(s.Metric[model.MetricNameLabel]) diff --git a/go.mod b/go.mod index 0631611234b..596d1449bc9 100644 --- a/go.mod +++ b/go.mod @@ -17,23 +17,21 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.122.0 - github.com/docker/docker v27.2.0+incompatible + github.com/digitalocean/godo v1.126.0 + github.com/docker/docker v27.3.1+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.13.0 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.7.0 - github.com/go-kit/log v0.2.1 - github.com/go-logfmt/logfmt v0.6.0 github.com/go-openapi/strfmt v0.23.0 - github.com/go-zookeeper/zk v1.0.3 + github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.14.0 + github.com/gophercloud/gophercloud v1.14.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.29.4 @@ -41,9 +39,9 @@ require ( github.com/hetznercloud/hcloud-go/v2 v2.13.1 github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.9 + github.com/klauspost/compress v1.17.10 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.40.0 + github.com/linode/linodego v1.41.0 github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -52,52 +50,52 @@ require ( github.com/oklog/ulid v1.3.1 github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.20.3 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.59.1 + github.com/prometheus/common v0.60.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.12.0 + github.com/prometheus/exporter-toolkit v0.13.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.14.1 - go.opentelemetry.io/collector/semconv v0.108.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 - go.opentelemetry.io/otel/sdk v1.29.0 - go.opentelemetry.io/otel/trace v1.29.0 + go.opentelemetry.io/collector/pdata v1.16.0 + go.opentelemetry.io/collector/semconv v0.110.0 + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/atomic v1.11.0 - go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.25.0 - golang.org/x/text v0.18.0 - golang.org/x/time v0.6.0 - golang.org/x/tools v0.24.0 - google.golang.org/api v0.195.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed - google.golang.org/grpc v1.66.0 + golang.org/x/sys v0.26.0 + golang.org/x/text v0.19.0 + golang.org/x/tools v0.26.0 + google.golang.org/api v0.199.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.0 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.130.1 ) require ( - cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -106,7 +104,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -119,7 +117,6 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-kit/kit v0.12.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect @@ -133,7 +130,7 @@ require ( github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.1 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect @@ -188,13 +185,14 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/time v0.6.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -207,11 +205,6 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace ( - k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 - k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.5.0 -) - // Exclude linodego v1.0.0 as it is no longer published on github. exclude github.com/linode/linodego v1.0.0 diff --git a/go.sum b/go.sum index 0246a377d26..7c5dcfba82f 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= -cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -22,8 +22,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -61,13 +61,8 @@ github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1v github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -78,23 +73,17 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -102,8 +91,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -117,24 +104,16 @@ github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -142,29 +121,22 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= -github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= +github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= -github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -182,11 +154,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -198,17 +167,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= -github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -234,26 +197,21 @@ github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQ github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -282,7 +240,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -324,7 +281,6 @@ github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -334,30 +290,20 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= -github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= -github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= +github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= @@ -366,7 +312,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= @@ -384,7 +329,6 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -395,51 +339,38 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -447,15 +378,13 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -470,11 +399,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= -github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= +github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -485,7 +411,6 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -493,7 +418,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= @@ -501,23 +425,16 @@ github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -538,64 +455,35 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -608,54 +496,43 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo= -github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc= +github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= +github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -664,28 +541,14 @@ github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= -github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= -github.com/simonpasquier/klog-gokit/v3 v3.5.0 h1:ewnk+ickph0hkQFgdI4pffKIbruAxxWcg0Fe/vQmLOM= -github.com/simonpasquier/klog-gokit/v3 v3.5.0/go.mod h1:S9flvRzzpaYLYtXI2w8jf9R/IU/Cy14NrbvDUevNP1E= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -703,28 +566,20 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -732,49 +587,42 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= -go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= -go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= -go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= +go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= +go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= +go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -782,8 +630,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -818,17 +666,12 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -839,7 +682,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -865,8 +707,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -893,11 +735,7 @@ golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -908,13 +746,11 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -955,16 +791,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -976,24 +812,20 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1005,8 +837,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1014,7 +844,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1035,13 +864,12 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1056,10 +884,9 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1070,7 +897,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1094,19 +920,14 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1116,8 +937,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1137,20 +958,13 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1166,7 +980,6 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1174,12 +987,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= @@ -1191,7 +1008,5 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2a37ea66d45..a6ad47acd3b 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -230,6 +230,17 @@ func (h *FloatHistogram) TestExpression() string { res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) } + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, "counter_reset_hint:reset") + case NotCounterReset: + res = append(res, "counter_reset_hint:not_reset") + case GaugeType: + res = append(res, "counter_reset_hint:gauge") + } + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) @@ -293,6 +304,14 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar h.Count /= scalar h.Sum /= scalar + // Division by zero removes all buckets. + if scalar == 0 { + h.PositiveBuckets = nil + h.NegativeBuckets = nil + h.PositiveSpans = nil + h.NegativeSpans = nil + return h + } for i := range h.PositiveBuckets { h.PositiveBuckets[i] /= scalar } @@ -342,7 +361,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } @@ -658,7 +677,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { @@ -891,7 +910,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index cf370a313e4..34988e9d39d 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -399,14 +399,10 @@ func TestFloatHistogramDiv(t *testing.T) { }, 0, &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: math.Inf(1), - Count: math.Inf(1), - Sum: math.Inf(1), - PositiveSpans: []Span{{-2, 1}, {2, 3}}, - PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, + ZeroThreshold: 0.01, + Count: math.Inf(1), + Sum: math.Inf(1), + ZeroCount: math.Inf(1), }, }, { diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index d7bdc1e0768..99529a38367 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -230,5 +230,5 @@ func contains(s []Label, n string) bool { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index c8bce51234a..c64bb990e02 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -16,7 +16,6 @@ package labels import ( - "reflect" "slices" "strings" "unsafe" @@ -299,10 +298,8 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. @@ -338,8 +335,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break diff --git a/model/labels/regexp.go b/model/labels/regexp.go index d2151d83ddb..3df94351948 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -63,13 +63,13 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { // available, even if the string matcher is faster. m.matchString = m.stringMatcher.Matches } else { - parsed, err := syntax.Parse(v, syntax.Perl) + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) if err != nil { return nil, err } // Simplify the syntax tree to run faster. parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") if err != nil { return nil, err } diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 24875e64ef3..8df0dbb0232 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -121,7 +121,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) { t.Parallel() m, err := NewFastRegexMatcher(r) require.NoError(t, err) - re := regexp.MustCompile("^(?:" + r + ")$") + re := regexp.MustCompile("^(?s:" + r + ")$") require.Equal(t, re.MatchString(v), m.MatchString(v)) }) } @@ -167,7 +167,7 @@ func TestOptimizeConcatRegex(t *testing.T) { } for _, c := range cases { - parsed, err := syntax.Parse(c.regex, syntax.Perl) + parsed, err := syntax.Parse(c.regex, syntax.Perl|syntax.DotNL) require.NoError(t, err) prefix, suffix, contains := optimizeConcatRegex(parsed) @@ -248,7 +248,7 @@ func TestFindSetMatches(t *testing.T) { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches, actualCaseSensitive := findSetMatches(parsed) require.Equal(t, c.expMatches, matches) @@ -348,15 +348,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { pattern string exp StringMatcher }{ - {".*", anyStringWithoutNewlineMatcher{}}, - {".*?", anyStringWithoutNewlineMatcher{}}, + {".*", trueMatcher{}}, + {".*?", trueMatcher{}}, {"(?s:.*)", trueMatcher{}}, - {"(.*)", anyStringWithoutNewlineMatcher{}}, - {"^.*$", anyStringWithoutNewlineMatcher{}}, - {".+", &anyNonEmptyStringMatcher{matchNL: false}}, + {"(.*)", trueMatcher{}}, + {"^.*$", trueMatcher{}}, + {".+", &anyNonEmptyStringMatcher{matchNL: true}}, {"(?s:.+)", &anyNonEmptyStringMatcher{matchNL: true}}, - {"^.+$", &anyNonEmptyStringMatcher{matchNL: false}}, - {"(.+)", &anyNonEmptyStringMatcher{matchNL: false}}, + {"^.+$", &anyNonEmptyStringMatcher{matchNL: true}}, + {"(.+)", &anyNonEmptyStringMatcher{matchNL: true}}, {"", emptyStringMatcher{}}, {"^$", emptyStringMatcher{}}, {"^foo$", &equalStringMatcher{s: "foo", caseSensitive: true}}, @@ -366,23 +366,23 @@ func TestStringMatcherFromRegexp(t *testing.T) { {`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, {"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})}, {"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, - {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: anyStringWithoutNewlineMatcher{}}}, - {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, + {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}}, + {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, {"10\\.0\\.(1|2)\\.+", nil}, - {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}}, - {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}}, - {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, - {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})}, - {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: nil}}, + {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{matchNL: true}, suffix: "foo", suffixCaseSensitive: true}}, + {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: trueMatcher{}}}, + {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, + {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: true}}})}, + {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: nil}}, // we don't support case insensitive matching for contains. // This is because there's no strings.IndexOfFold function. // We can revisit later if this is really popular by using strings.ToUpper. @@ -393,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {".*foo.*bar.*", nil}, {`\d*`, nil}, {".", nil}, - {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}}, + {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}}, // This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat. // It would make the code too complex to handle it. {"(.+)/(foo.*|bar$)", nil}, // Case sensitive alternate with same literal prefix and .* suffix. - {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: trueMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: trueMatcher{}}}}}, // Case insensitive alternate with same literal prefix and .* suffix. - {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, - {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, + {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, // Concatenated variable length selectors are not supported. {"foo.*.*", nil}, {"foo.+.+", nil}, @@ -410,15 +410,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"aaa.?.?", nil}, {"aaa.?.*", nil}, // Regexps with ".?". - {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, + {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}}, {"f.?o", nil}, } { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches := stringMatcherFromRegexp(parsed) require.Equal(t, c.exp, matches) @@ -437,16 +437,16 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { { pattern: "(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX"}, - expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp"}, }, // Nested literal prefixes, case sensitive. @@ -474,13 +474,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher. numPrefixMatchers := 0 @@ -523,16 +523,16 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { { pattern: "(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Nested literal suffixes, case sensitive. @@ -552,13 +552,13 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalSuffixStringMatcher. numSuffixMatchers := 0 @@ -598,26 +598,26 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "test.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "test!"}, - expectedNotMatches: []string{"test\n", "tes", "test!!"}, + expectedMatches: []string{"test\n", "test", "test!"}, + expectedNotMatches: []string{"tes", "test!!"}, }, { pattern: ".?test", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "!test"}, - expectedNotMatches: []string{"\ntest", "tes", "test!"}, + expectedMatches: []string{"\ntest", "test", "!test"}, + expectedNotMatches: []string{"tes", "test!"}, }, { pattern: "(aaa.?|bbb.?)", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX"}, - expectedNotMatches: []string{"aa", "aaaXX", "aaa\n", "bb", "bbbXX", "bbb\n"}, + expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX", "aaa\n", "bbb\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "bb", "bbbXX"}, }, { pattern: ".*aaa.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX"}, - expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX", "XXXaaa\n"}, + expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX", "XXXaaa\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX"}, }, // Match newline. @@ -632,18 +632,18 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "(aaa.?|((?s).?bbb.+))", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX"}, - expectedNotMatches: []string{"aa", "aaa\n", "Xbbb", "\nbbb"}, + expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX", "aaa\n"}, + expectedNotMatches: []string{"aa", "Xbbb", "\nbbb"}, }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains zeroOrOneCharacterStringMatcher. numZeroOrOneMatchers := 0 @@ -1112,7 +1112,7 @@ func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) { } b.Logf("regexp: %s", re) - parsed, err := syntax.Parse(re, syntax.Perl) + parsed, err := syntax.Parse(re, syntax.Perl|syntax.DotNL) require.NoError(b, err) unoptimized := stringMatcherFromRegexpInternal(parsed) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index a880465969a..eb79f7be21c 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -171,7 +171,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -218,8 +218,8 @@ func (re Regexp) String() string { } str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations @@ -277,6 +277,13 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + lb.Set(cfg.TargetLabel, cfg.Replacement) + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -326,3 +333,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index fc9952134d5..0c6d41f5e37 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -569,6 +569,29 @@ func TestRelabel(t *testing.T) { }, drop: true, }, + { + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "d", + Separator: ";", + Replacement: "match${1}", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "d": "match", + }), + }, } for _, test := range tests { @@ -838,6 +861,34 @@ func BenchmarkRelabel(b *testing.B) { "__scrape_timeout__", "10s", "job", "kubernetes-pods"), }, + { + name: "static label pair", + config: ` + - replacement: wwwwww + target_label: wwwwww + - replacement: yyyyyyyyyyyy + target_label: xxxxxxxxx + - replacement: xxxxxxxxx + target_label: yyyyyyyyyyyy + - source_labels: ["something"] + target_label: with_source_labels + replacement: value + - replacement: dropped + target_label: ${0} + - replacement: ${0} + target_label: dropped`, + lbls: labels.FromStrings( + "abcdefg01", "hijklmn1", + "abcdefg02", "hijklmn2", + "abcdefg03", "hijklmn3", + "abcdefg04", "hijklmn4", + "abcdefg05", "hijklmn5", + "abcdefg06", "hijklmn6", + "abcdefg07", "hijklmn7", + "abcdefg08", "hijklmn8", + "job", "foo", + ), + }, } for i := range tests { err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs) diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index bfb85ce7405..ef6ac17fe3e 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -111,6 +111,20 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { ) } + for k, v := range g.Labels { + if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { + errs = append( + errs, fmt.Errorf("invalid label name: %s", k), + ) + } + + if !model.LabelValue(v).IsValid() { + errs = append( + errs, fmt.Errorf("invalid label value: %s", v), + ) + } + } + set[g.Name] = struct{}{} for i, r := range g.Rules { @@ -136,11 +150,12 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { // RuleGroup is a list of sequentially evaluated recording and alerting rules. type RuleGroup struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - QueryOffset *model.Duration `yaml:"query_offset,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` } // Rule describes an alerting or recording rule. diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index ef5008f4bf1..73ea1745949 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -85,9 +85,8 @@ func TestParseFileFailure(t *testing.T) { for _, c := range table { _, errs := ParseFile(filepath.Join("testdata", c.filename)) - require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename) - require.Error(t, errs[0]) - require.Containsf(t, errs[0].Error(), c.errMsg, "Expected error for %s.", c.filename) + require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename) + require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) } } @@ -108,6 +107,23 @@ groups: severity: "page" annotations: summary: "Instance {{ $labels.instance }} down" +`, + shouldPass: true, + }, + { + ruleString: ` +groups: +- name: example + labels: + team: myteam + rules: + - alert: InstanceDown + expr: up == 0 + for: 5m + labels: + severity: "page" + annotations: + summary: "Instance {{ $labels.instance }} down" `, shouldPass: true, }, @@ -259,8 +275,7 @@ func TestError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.error.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.error, tt.want) }) } } @@ -308,8 +323,7 @@ func TestWrappedError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.wrappedError.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.wrappedError, tt.want) }) } } diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go new file mode 100644 index 00000000000..bd0d5089aca --- /dev/null +++ b/model/textparse/benchmark_test.go @@ -0,0 +1,185 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" +) + +type newParser func([]byte, *labels.SymbolTable) Parser + +var newTestParserFns = map[string]newParser{ + "promtext": NewPromParser, + "promproto": func(b []byte, st *labels.SymbolTable) Parser { + return NewProtobufParser(b, true, st) + }, + "omtext": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + }, + "omtext_with_nhcb": func(b []byte, st *labels.SymbolTable) Parser { + p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + return NewNHCBParser(p, st, false) + }, +} + +// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it. +// Typically used as follows: +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParse' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +// For profiles, add -memprofile=${bench}.mem.pprof -cpuprofile=${bench}.cpu.pprof +// options. +// +// NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated +// Series, Series+Metrics with and without reuse, Series+CT. Those cases are sometimes +// good to know if you are working on a certain optimization, but it does not +// make sense to persist such cases for everybody (e.g. for CI one day). +// For local iteration, feel free to adjust cases/comment out code etc. +// +// NOTE(bwplotka): Do not try to conclude "what parser (OM, proto, prom) is the fastest" +// as the testdata has different amount and type of metrics and features (e.g. exemplars). +func BenchmarkParse(b *testing.B) { + for _, bcase := range []struct { + dataFile string // Localized to "./testdata". + dataProto []byte + parser string + + compareToExpfmtFormat expfmt.FormatType + }{ + {dataFile: "promtestdata.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, + {dataFile: "promtestdata.nometa.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, + + // We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. + {dataProto: createTestProtoBuf(b).Bytes(), parser: "promproto"}, + + // We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703. + {dataFile: "omtestdata.txt", parser: "omtext"}, + {dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext. + + // NHCB. + {dataFile: "omhistogramdata.txt", parser: "omtext"}, // Measure OM parser baseline for histograms. + {dataFile: "omhistogramdata.txt", parser: "omtext_with_nhcb"}, // Measure NHCB over OM parser. + } { + var buf []byte + dataCase := bcase.dataFile + if len(bcase.dataProto) > 0 { + dataCase = "createTestProtoBuf()" + buf = bcase.dataProto + } else { + f, err := os.Open(filepath.Join("testdata", bcase.dataFile)) + require.NoError(b, err) + b.Cleanup(func() { + _ = f.Close() + }) + buf, err = io.ReadAll(f) + require.NoError(b, err) + } + b.Run(fmt.Sprintf("data=%v/parser=%v", dataCase, bcase.parser), func(b *testing.B) { + newParserFn := newTestParserFns[bcase.parser] + var ( + res labels.Labels + e exemplar.Exemplar + ) + + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i++ { + p := newParserFn(buf, st) + + Inner: + for { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Inner + } + b.Fatal(err) + case EntryType: + _, _ = p.Type() + continue + case EntryHelp: + _, _ = p.Help() + continue + case EntryUnit: + _, _ = p.Unit() + continue + case EntryComment: + continue + case EntryHistogram: + _, _, _, _ = p.Histogram() + case EntrySeries: + _, _, _ = p.Series() + default: + b.Fatal("not implemented entry", t) + } + + _ = p.Metric(&res) + _ = p.CreatedTimestamp() + for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { + } + } + } + }) + + b.Run(fmt.Sprintf("data=%v/parser=xpfmt", dataCase), func(b *testing.B) { + if bcase.compareToExpfmtFormat == expfmt.TypeUnknown { + b.Skip("compareToExpfmtFormat not set") + } + + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + decSamples := make(model.Vector, 0, 50) + sdec := expfmt.SampleDecoder{ + Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(bcase.compareToExpfmtFormat)), + Opts: &expfmt.DecodeOptions{ + Timestamp: model.TimeFromUnixNano(0), + }, + } + + for { + if err := sdec.Decode(&decSamples); err != nil { + if errors.Is(err, io.EOF) { + break + } + b.Fatal(err) + } + decSamples = decSamples[:0] + } + } + }) + } +} diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 0b5d9281e4d..26828552819 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -14,6 +14,8 @@ package textparse import ( + "errors" + "fmt" "mime" "github.com/prometheus/common/model" @@ -23,8 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -// Parser parses samples from a byte slice of samples in the official -// Prometheus and OpenMetrics text exposition formats. +// Parser parses samples from a byte slice of samples in different exposition formats. type Parser interface { // Series returns the bytes of a series with a simple float64 as a // value, the timestamp if set, and the value of the current sample. @@ -58,6 +59,8 @@ type Parser interface { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. + // The values of the "le" labels of classic histograms and "quantile" labels + // of summaries should follow the OpenMetrics formatting rules. Metric(l *labels.Labels) string // Exemplar writes the exemplar of the current sample into the passed @@ -69,6 +72,8 @@ type Parser interface { // CreatedTimestamp returns the created timestamp (in milliseconds) for the // current sample. It returns nil if it is unknown e.g. if it wasn't set, // if the scrape protocol or metric type does not support created timestamps. + // Assume the CreatedTimestamp returned pointer is only valid until + // the Next iteration. CreatedTimestamp() *int64 // Next advances the parser to the next sample. @@ -76,26 +81,65 @@ type Parser interface { Next() (Entry, error) } -// New returns a new parser of the byte slice. -// -// This function always returns a valid parser, but might additionally -// return an error if the content type cannot be parsed. -func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { +// extractMediaType returns the mediaType of a required parser. It tries first to +// extract a valid and supported mediaType from contentType. If that fails, +// the provided fallbackType (possibly an empty string) is returned, together with +// an error. fallbackType is used as-is without further validation. +func extractMediaType(contentType, fallbackType string) (string, error) { if contentType == "" { - return NewPromParser(b, st), nil + if fallbackType == "" { + return "", errors.New("non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target") + } + return fallbackType, fmt.Errorf("non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol %q", fallbackType) } + // We have a contentType, parse it. mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { - return NewPromParser(b, st), err + if fallbackType == "" { + retErr := fmt.Errorf("cannot parse Content-Type %q and no fallback_scrape_protocol for target", contentType) + return "", errors.Join(retErr, err) + } + retErr := fmt.Errorf("could not parse received Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) + return fallbackType, errors.Join(retErr, err) + } + + // We have a valid media type, either we recognise it and can use it + // or we have to error. + switch mediaType { + case "application/openmetrics-text", "application/vnd.google.protobuf", "text/plain": + return mediaType, nil + } + // We're here because we have no recognised mediaType. + if fallbackType == "" { + return "", fmt.Errorf("received unsupported Content-Type %q and no fallback_scrape_protocol specified for target", contentType) } + return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) +} + +// New returns a new parser of the byte slice. +// +// This function no longer guarantees to return a valid parser. +// +// It only returns a valid parser if the supplied contentType and fallbackType allow. +// An error may also be returned if fallbackType had to be used or there was some +// other error parsing the supplied Content-Type. +// If the returned parser is nil then the scrape must fail. +func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) { + mediaType, err := extractMediaType(contentType, fallbackType) + // err may be nil or something we want to warn about. + switch mediaType { case "application/openmetrics-text": - return NewOpenMetricsParser(b, st), nil + return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { + o.SkipCTSeries = skipOMCTSeries + }), err case "application/vnd.google.protobuf": - return NewProtobufParser(b, parseClassicHistograms, st), nil + return NewProtobufParser(b, parseClassicHistograms, st), err + case "text/plain": + return NewPromParser(b, st), err default: - return NewPromParser(b, st), nil + return nil, err } } diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index c644565628c..72c8284f2d1 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -14,16 +14,28 @@ package textparse import ( + "errors" + "io" "testing" + "github.com/google/go-cmp/cmp" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/testutil" ) func TestNewParser(t *testing.T) { t.Parallel() + requireNilParser := func(t *testing.T, p Parser) { + require.Nil(t, p) + } + requirePromParser := func(t *testing.T, p Parser) { require.NotNil(t, p) _, ok := p.(*PromParser) @@ -36,34 +48,83 @@ func TestNewParser(t *testing.T) { require.True(t, ok) } + requireProtobufParser := func(t *testing.T, p Parser) { + require.NotNil(t, p) + _, ok := p.(*ProtobufParser) + require.True(t, ok) + } + for name, tt := range map[string]*struct { - contentType string - validateParser func(*testing.T, Parser) - err string + contentType string + fallbackScrapeProtocol config.ScrapeProtocol + validateParser func(*testing.T, Parser) + err string }{ "empty-string": { - validateParser: requirePromParser, + validateParser: requireNilParser, + err: "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target", + }, + "empty-string-fallback-text-plain": { + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText0_0_4, + err: "non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol \"text/plain\"", }, "invalid-content-type-1": { contentType: "invalid/", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "expected token after slash", }, + "invalid-content-type-1-fallback-text-plain": { + contentType: "invalid/", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText0_0_4, + err: "expected token after slash", + }, + "invalid-content-type-1-fallback-openmetrics": { + contentType: "invalid/", + validateParser: requireOpenMetricsParser, + fallbackScrapeProtocol: config.OpenMetricsText0_0_1, + err: "expected token after slash", + }, + "invalid-content-type-1-fallback-protobuf": { + contentType: "invalid/", + validateParser: requireProtobufParser, + fallbackScrapeProtocol: config.PrometheusProto, + err: "expected token after slash", + }, "invalid-content-type-2": { contentType: "invalid/invalid/invalid", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "unexpected content after media subtype", }, + "invalid-content-type-2-fallback-text-plain": { + contentType: "invalid/invalid/invalid", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText1_0_0, + err: "unexpected content after media subtype", + }, "invalid-content-type-3": { contentType: "/", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "no media type", }, + "invalid-content-type-3-fallback-text-plain": { + contentType: "/", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText1_0_0, + err: "no media type", + }, "invalid-content-type-4": { contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "duplicate parameter name", }, + "invalid-content-type-4-fallback-open-metrics": { + contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8", + validateParser: requireOpenMetricsParser, + fallbackScrapeProtocol: config.OpenMetricsText1_0_0, + err: "duplicate parameter name", + }, "openmetrics": { contentType: "application/openmetrics-text", validateParser: requireOpenMetricsParser, @@ -80,27 +141,129 @@ func TestNewParser(t *testing.T) { contentType: "text/plain", validateParser: requirePromParser, }, + "protobuf": { + contentType: "application/vnd.google.protobuf", + validateParser: requireProtobufParser, + }, "plain-text-with-version": { contentType: "text/plain; version=0.0.4", validateParser: requirePromParser, }, "some-other-valid-content-type": { contentType: "text/html", - validateParser: requirePromParser, + validateParser: requireNilParser, + err: "received unsupported Content-Type \"text/html\" and no fallback_scrape_protocol specified for target", + }, + "some-other-valid-content-type-fallback-text-plain": { + contentType: "text/html", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText0_0_4, + err: "received unsupported Content-Type \"text/html\", using fallback_scrape_protocol \"text/plain\"", }, } { t.Run(name, func(t *testing.T) { tt := tt // Copy to local variable before going parallel. t.Parallel() - p, err := New([]byte{}, tt.contentType, false, labels.NewSymbolTable()) + fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType() + + p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, labels.NewSymbolTable()) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) } else { - require.Error(t, err) - require.Contains(t, err.Error(), tt.err) + require.ErrorContains(t, err, tt.err) } }) } } + +// parsedEntry represents data that is parsed for each entry. +type parsedEntry struct { + // In all but EntryComment, EntryInvalid. + m string + + // In EntryHistogram. + shs *histogram.Histogram + fhs *histogram.FloatHistogram + + // In EntrySeries. + v float64 + + // In EntrySeries and EntryHistogram. + lset labels.Labels + t *int64 + es []exemplar.Exemplar + ct *int64 + + // In EntryType. + typ model.MetricType + // In EntryHelp. + help string + // In EntryUnit. + unit string + // In EntryComment. + comment string +} + +func requireEntries(t *testing.T, exp, got []parsedEntry) { + t.Helper() + + testutil.RequireEqualWithOptions(t, exp, got, []cmp.Option{ + cmp.AllowUnexported(parsedEntry{}), + }) +} + +func testParse(t *testing.T, p Parser) (ret []parsedEntry) { + t.Helper() + + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + var got parsedEntry + var m []byte + switch et { + case EntryInvalid: + t.Fatal("entry invalid not expected") + case EntrySeries, EntryHistogram: + if et == EntrySeries { + m, got.t, got.v = p.Series() + got.m = string(m) + } else { + m, got.t, got.shs, got.fhs = p.Histogram() + got.m = string(m) + } + + p.Metric(&got.lset) + // Parser reuses int pointer. + if ct := p.CreatedTimestamp(); ct != nil { + got.ct = int64p(*ct) + } + for e := (exemplar.Exemplar{}); p.Exemplar(&e); { + got.es = append(got.es, e) + } + case EntryType: + m, got.typ = p.Type() + got.m = string(m) + + case EntryHelp: + m, h := p.Help() + got.m = string(m) + got.help = string(h) + + case EntryUnit: + m, u := p.Unit() + got.m = string(m) + got.unit = string(u) + + case EntryComment: + got.comment = string(p.Comment()) + } + ret = append(ret, got) + } + return ret +} diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go new file mode 100644 index 00000000000..d019c327c37 --- /dev/null +++ b/model/textparse/nhcbparse.go @@ -0,0 +1,376 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "errors" + "io" + "math" + "strconv" + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/convertnhcb" +) + +type collectionState int + +const ( + stateStart collectionState = iota + stateCollecting + stateEmitting +) + +// The NHCBParser wraps a Parser and converts classic histograms to native +// histograms with custom buckets. +// +// Since Parser interface is line based, this parser needs to keep track +// of the last classic histogram series it saw to collate them into a +// single native histogram. +// +// Note: +// - Only series that have the histogram metadata type are considered for +// conversion. +// - The classic series are also returned if keepClassicHistograms is true. +type NHCBParser struct { + // The parser we're wrapping. + parser Parser + // Option to keep classic histograms along with converted histograms. + keepClassicHistograms bool + + // Labels builder. + builder labels.ScratchBuilder + + // State of the parser. + state collectionState + + // Caches the values from the underlying parser. + // For Series and Histogram. + bytes []byte + ts *int64 + value float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + // For Metric. + lset labels.Labels + metricString string + // For Type. + bName []byte + typ model.MetricType + + // Caches the entry itself if we are inserting a converted NHCB + // halfway through. + entry Entry + err error + + // Caches the values and metric for the inserted converted NHCB. + bytesNHCB []byte + hNHCB *histogram.Histogram + fhNHCB *histogram.FloatHistogram + lsetNHCB labels.Labels + exemplars []exemplar.Exemplar + ctNHCB *int64 + metricStringNHCB string + + // Collates values from the classic histogram series to build + // the converted histogram later. + tempLsetNHCB labels.Labels + tempNHCB convertnhcb.TempHistogram + tempExemplars []exemplar.Exemplar + tempExemplarCount int + tempCT *int64 + + // Remembers the last base histogram metric name (assuming it's + // a classic histogram) so we can tell if the next float series + // is part of the same classic histogram. + lastHistogramName string + lastHistogramLabelsHash uint64 + lastHistogramExponential bool + // Reused buffer for hashing labels. + hBuffer []byte +} + +func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser { + return &NHCBParser{ + parser: p, + keepClassicHistograms: keepClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + tempNHCB: convertnhcb.NewTempHistogram(), + } +} + +func (p *NHCBParser) Series() ([]byte, *int64, float64) { + return p.bytes, p.ts, p.value +} + +func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + if p.state == stateEmitting { + return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB + } + return p.bytes, p.ts, p.h, p.fh +} + +func (p *NHCBParser) Help() ([]byte, []byte) { + return p.parser.Help() +} + +func (p *NHCBParser) Type() ([]byte, model.MetricType) { + return p.bName, p.typ +} + +func (p *NHCBParser) Unit() ([]byte, []byte) { + return p.parser.Unit() +} + +func (p *NHCBParser) Comment() []byte { + return p.parser.Comment() +} + +func (p *NHCBParser) Metric(l *labels.Labels) string { + if p.state == stateEmitting { + *l = p.lsetNHCB + return p.metricStringNHCB + } + *l = p.lset + return p.metricString +} + +func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.state == stateEmitting { + if len(p.exemplars) == 0 { + return false + } + *ex = p.exemplars[0] + p.exemplars = p.exemplars[1:] + return true + } + return p.parser.Exemplar(ex) +} + +func (p *NHCBParser) CreatedTimestamp() *int64 { + switch p.state { + case stateStart: + if p.entry == EntrySeries || p.entry == EntryHistogram { + return p.parser.CreatedTimestamp() + } + case stateCollecting: + return p.tempCT + case stateEmitting: + return p.ctNHCB + } + return nil +} + +func (p *NHCBParser) Next() (Entry, error) { + if p.state == stateEmitting { + p.state = stateStart + if p.entry == EntrySeries { + isNHCB := p.handleClassicHistogramSeries(p.lset) + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + return p.Next() + } + } + return p.entry, p.err + } + + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { + return EntryHistogram, nil + } + return EntryInvalid, p.err + } + switch p.entry { + case EntrySeries: + p.bytes, p.ts, p.value = p.parser.Series() + p.metricString = p.parser.Metric(&p.lset) + // Check the label set to see if we can continue or need to emit the NHCB. + var isNHCB bool + if p.compareLabels() { + // Labels differ. Check if we can emit the NHCB. + if p.processNHCB() { + return EntryHistogram, nil + } + isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Labels are the same. Check if after an exponential histogram. + if p.lastHistogramExponential { + isNHCB = false + } else { + isNHCB = p.handleClassicHistogramSeries(p.lset) + } + } + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + return p.Next() + } + return p.entry, p.err + case EntryHistogram: + p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() + p.metricString = p.parser.Metric(&p.lset) + p.storeExponentialLabels() + case EntryType: + p.bName, p.typ = p.parser.Type() + } + if p.processNHCB() { + return EntryHistogram, nil + } + return p.entry, p.err +} + +// Return true if labels have changed and we should emit the NHCB. +func (p *NHCBParser) compareLabels() bool { + if p.state != stateCollecting { + return false + } + if p.typ != model.MetricTypeHistogram { + // Different metric type. + return true + } + if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) { + // Different metric name. + return true + } + nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + // Different label values. + return p.lastHistogramLabelsHash != nextHash +} + +// Save the label set of the classic histogram without suffix and bucket `le` label. +func (p *NHCBParser) storeClassicLabels() { + p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + p.lastHistogramExponential = false +} + +func (p *NHCBParser) storeExponentialLabels() { + p.lastHistogramName = p.lset.Get(labels.MetricName) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer) + p.lastHistogramExponential = true +} + +// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB +// if it is actually a classic histogram series (and not a normal float series) and if there +// isn't already a native histogram with the same name (assuming it is always processed +// right before the classic histograms) and returns true if the collation was done. +func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { + if p.typ != model.MetricTypeHistogram { + return false + } + mName := lset.Get(labels.MetricName) + // Sanity check to ensure that the TYPE metadata entry name is the same as the base name. + if convertnhcb.GetHistogramMetricBaseName(mName) != string(p.bName) { + return false + } + switch { + case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel): + le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) + if err == nil && !math.IsNaN(le) { + p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) { + hist.BucketCounts[le] = p.value + }) + return true + } + case strings.HasSuffix(mName, "_count"): + p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) { + hist.Count = p.value + }) + return true + case strings.HasSuffix(mName, "_sum"): + p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) { + hist.Sum = p.value + }) + return true + } + return false +} + +func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { + if p.state != stateCollecting { + p.storeClassicLabels() + p.tempCT = p.parser.CreatedTimestamp() + p.state = stateCollecting + } + p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) + p.storeExemplars() + updateHist(&p.tempNHCB) +} + +func (p *NHCBParser) storeExemplars() { + for ex := p.nextExemplarPtr(); p.parser.Exemplar(ex); ex = p.nextExemplarPtr() { + p.tempExemplarCount++ + } +} + +func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar { + switch { + case p.tempExemplarCount == len(p.tempExemplars)-1: + // Reuse the previously allocated exemplar, it was not filled up. + case len(p.tempExemplars) == cap(p.tempExemplars): + // Let the runtime grow the slice. + p.tempExemplars = append(p.tempExemplars, exemplar.Exemplar{}) + default: + // Take the next element into use. + p.tempExemplars = p.tempExemplars[:len(p.tempExemplars)+1] + } + return &p.tempExemplars[len(p.tempExemplars)-1] +} + +func (p *NHCBParser) swapExemplars() { + p.exemplars = p.tempExemplars[:p.tempExemplarCount] + p.tempExemplars = p.tempExemplars[:0] + p.tempExemplarCount = 0 +} + +// processNHCB converts the collated classic histogram series to NHCB and caches the info +// to be returned to callers. Retruns true if the conversion was successful. +func (p *NHCBParser) processNHCB() bool { + if p.state != stateCollecting { + return false + } + ub := make([]float64, 0, len(p.tempNHCB.BucketCounts)) + for b := range p.tempNHCB.BucketCounts { + ub = append(ub, b) + } + upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false) + fhBase := hBase.ToFloat(nil) + h, fh := convertnhcb.NewHistogram(p.tempNHCB, upperBounds, hBase, fhBase) + if h != nil { + if err := h.Validate(); err != nil { + return false + } + p.hNHCB = h + p.fhNHCB = nil + } else if fh != nil { + if err := fh.Validate(); err != nil { + return false + } + p.hNHCB = nil + p.fhNHCB = fh + } + p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",") + p.bytesNHCB = []byte(p.metricStringNHCB) + p.lsetNHCB = p.tempLsetNHCB + p.swapExemplars() + p.ctNHCB = p.tempCT + p.tempNHCB = convertnhcb.NewTempHistogram() + p.state = stateEmitting + p.tempCT = nil + return true +} diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go new file mode 100644 index 00000000000..6152a850385 --- /dev/null +++ b/model/textparse/nhcbparse_test.go @@ -0,0 +1,939 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "encoding/binary" + "strconv" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +func TestNHCBParserOnOMParser(t *testing.T) { + // The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser. + + input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +# UNIT go_gc_duration_seconds seconds +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05 +# HELP nohelp1 +# HELP help2 escape \ \n \\ \" \x chars +# UNIT nounit +go_gc_duration_seconds{quantile="1.0",a="b"} 8.3835e-05 +go_gc_duration_seconds_count 99 +some:aggregate:rate5m{a_b="c"} 1 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 33 123.123 +# TYPE hh histogram +hh_bucket{le="+Inf"} 1 +# TYPE gh gaugehistogram +gh_bucket{le="+Inf"} 1 +# TYPE hhh histogram +hhh_bucket{le="+Inf"} 1 # {id="histogram-bucket-test"} 4 +hhh_count 1 # {id="histogram-count-test"} 4 +# TYPE ggh gaugehistogram +ggh_bucket{le="+Inf"} 1 # {id="gaugehistogram-bucket-test",xx="yy"} 4 123.123 +ggh_count 1 # {id="gaugehistogram-count-test",xx="yy"} 4 123.123 +# TYPE smr_seconds summary +smr_seconds_count 2.0 # {id="summary-count-test"} 1 123.321 +smr_seconds_sum 42.0 # {id="summary-sum-test"} 1 123.321 +# TYPE ii info +ii{foo="bar"} 1 +# TYPE ss stateset +ss{ss="foo"} 1 +ss{ss="bar"} 0 +ss{A="a"} 0 +# TYPE un unknown +_metric_starting_with_underscore 1 +testmetric{_label_starting_with_underscore="foo"} 1 +testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases +# TYPE foo counter +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1520872607.123 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1520872607.123 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520872608.124 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520872609.125 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0 +# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 1 +something_bucket{le="+Inf"} 18 +something_count{a="b"} 9 +something_sum{a="b"} 42123.0 +something_bucket{a="b",le="0.0"} 8 +something_bucket{a="b",le="+Inf"} 9 +something_created{a="b"} 1520430002 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 20 +yum_sum 324789.5 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_count 21 +foobar_created 1520430004 +foobar_sum 324789.6 +foobar{quantile="0.95"} 123.8 +foobar{quantile="0.99"} 150.1` + + input += "\n# HELP metric foo\x00bar" + input += "\nnull_byte_metric{a=\"abc\x00\"} 1" + input += "\n# EOF\n" + + exp := []parsedEntry{ + { + m: "go_gc_duration_seconds", + help: "A summary of the GC invocation durations.", + }, { + m: "go_gc_duration_seconds", + typ: model.MetricTypeSummary, + }, { + m: "go_gc_duration_seconds", + unit: "seconds", + }, { + m: `go_gc_duration_seconds{quantile="0"}`, + v: 4.9351e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), + }, { + m: `go_gc_duration_seconds{quantile="0.25"}`, + v: 7.424100000000001e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"), + }, { + m: `go_gc_duration_seconds{quantile="0.5",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"), + }, { + m: "nohelp1", + help: "", + }, { + m: "help2", + help: "escape \\ \n \\ \" \\x chars", + }, { + m: "nounit", + unit: "", + }, { + m: `go_gc_duration_seconds{quantile="1.0",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), + }, { + m: `go_gc_duration_seconds_count`, + v: 99, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"), + }, { + m: `some:aggregate:rate5m{a_b="c"}`, + v: 1, + lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), + }, { + m: "go_goroutines", + help: "Number of goroutines that currently exist.", + }, { + m: "go_goroutines", + typ: model.MetricTypeGauge, + }, { + m: `go_goroutines`, + v: 33, + t: int64p(123123), + lset: labels.FromStrings("__name__", "go_goroutines"), + }, { + m: "hh", + typ: model.MetricTypeHistogram, + }, { + m: `hh{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1, + Sum: 0.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + // Custom values are empty as we do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "hh"), + }, { + m: "gh", + typ: model.MetricTypeGaugeHistogram, + }, { + m: `gh_bucket{le="+Inf"}`, + v: 1, + lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"), + }, { + m: "hhh", + typ: model.MetricTypeHistogram, + }, { + m: `hhh{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1, + Sum: 0.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + // Custom values are empty as we do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "hhh"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + {Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, + }, + }, { + m: "ggh", + typ: model.MetricTypeGaugeHistogram, + }, { + m: `ggh_bucket{le="+Inf"}`, + v: 1, + lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}}, + }, { + m: `ggh_count`, + v: 1, + lset: labels.FromStrings("__name__", "ggh_count"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}}, + }, { + m: "smr_seconds", + typ: model.MetricTypeSummary, + }, { + m: `smr_seconds_count`, + v: 2, + lset: labels.FromStrings("__name__", "smr_seconds_count"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}}, + }, { + m: `smr_seconds_sum`, + v: 42, + lset: labels.FromStrings("__name__", "smr_seconds_sum"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}}, + }, { + m: "ii", + typ: model.MetricTypeInfo, + }, { + m: `ii{foo="bar"}`, + v: 1, + lset: labels.FromStrings("__name__", "ii", "foo", "bar"), + }, { + m: "ss", + typ: model.MetricTypeStateset, + }, { + m: `ss{ss="foo"}`, + v: 1, + lset: labels.FromStrings("__name__", "ss", "ss", "foo"), + }, { + m: `ss{ss="bar"}`, + v: 0, + lset: labels.FromStrings("__name__", "ss", "ss", "bar"), + }, { + m: `ss{A="a"}`, + v: 0, + lset: labels.FromStrings("A", "a", "__name__", "ss"), + }, { + m: "un", + typ: model.MetricTypeUnknown, + }, { + m: "_metric_starting_with_underscore", + v: 1, + lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"), + }, { + m: "testmetric{_label_starting_with_underscore=\"foo\"}", + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), + }, { + m: "testmetric{label=\"\\\"bar\\\"\"}", + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", + }, { + m: "foo", + typ: model.MetricTypeCounter, + }, { + m: "foo_total", + v: 17, + lset: labels.FromStrings("__name__", "foo_total"), + t: int64p(1520879607789), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}}, + ct: int64p(1520872607123), + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: labels.FromStrings("__name__", "foo_total", "a", "b"), + t: int64p(1520879607789), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}}, + ct: int64p(1520872607123), + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: labels.FromStrings("__name__", "bar_count"), + ct: int64p(1520872608124), + }, { + m: "bar_sum", + v: 324789.3, + lset: labels.FromStrings("__name__", "bar_sum"), + ct: int64p(1520872608124), + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ct: int64p(1520872608124), + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ct: int64p(1520872608124), + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 17, + Sum: 324789.3, + PositiveSpans: []histogram.Span{{Offset: 1, Length: 1}}, // The first bucket has 0 count so we don't store it and Offset is 1. + PositiveBuckets: []int64{17}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "baz"), + ct: int64p(1520872609125), + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: labels.FromStrings("__name__", "fizz_created"), + }, { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 18, + Sum: 324789.4, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{1, 16}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something"), + ct: int64p(1520430001000), + }, { + m: `something{a="b"}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 42123.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{8, -7}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something", "a", "b"), + ct: int64p(1520430002000), + }, { + m: "yum", + help: "Summary with _created between sum and quantiles", + }, { + m: "yum", + typ: model.MetricTypeSummary, + }, { + m: `yum_count`, + v: 20, + lset: labels.FromStrings("__name__", "yum_count"), + ct: int64p(1520430003000), + }, { + m: `yum_sum`, + v: 324789.5, + lset: labels.FromStrings("__name__", "yum_sum"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"), + ct: int64p(1520430003000), + }, { + m: "foobar", + help: "Summary with _created as the first line", + }, { + m: "foobar", + typ: model.MetricTypeSummary, + }, { + m: `foobar_count`, + v: 21, + lset: labels.FromStrings("__name__", "foobar_count"), + ct: int64p(1520430004000), + }, { + m: `foobar_sum`, + v: 324789.6, + lset: labels.FromStrings("__name__", "foobar_sum"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.95"}`, + v: 123.8, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.99"}`, + v: 150.1, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"), + ct: int64p(1520430004000), + }, { + m: "metric", + help: "foo\x00bar", + }, { + m: "null_byte_metric{a=\"abc\x00\"}", + v: 1, + lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + p = NewNHCBParser(p, labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) +} + +func TestNHCBParserOMParser_MultipleHistograms(t *testing.T) { + // The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser. + + input := `# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_bucket{le="0.0"} 1 # {id="something-test"} -2.0 +something_bucket{le="1.0"} 16 # {id="something-test"} 0.5 +something_bucket{le="+Inf"} 18 # {id="something-test"} 8 +something_count{a="b"} 9 +something_sum{a="b"} 42123.0 +something_bucket{a="b",le="0.0"} 8 # {id="something-test"} 0.0 123.321 +something_bucket{a="b",le="1.0"} 8 +something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 +# EOF +` + + exp := []parsedEntry{ + { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 18, + Sum: 324789.4, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{1, 14, -13}, + CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "something-test"), Value: -2.0}, + {Labels: labels.FromStrings("id", "something-test"), Value: 0.5}, + {Labels: labels.FromStrings("id", "something-test"), Value: 8.0}, + }, + }, { + m: `something{a="b"}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 42123.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}, {Offset: 1, Length: 1}}, + PositiveBuckets: []int64{8, -7}, + CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something", "a", "b"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "something-test"), Value: 0.0, HasTs: true, Ts: 123321}, + {Labels: labels.FromStrings("id", "something-test"), Value: 2e100, HasTs: true, Ts: 123000}, + }, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + + p = NewNHCBParser(p, labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) +} + +// Verify the requirement tables from +// https://github.com/prometheus/prometheus/issues/13532 . +// "classic" means the option "always_scrape_classic_histograms". +// "nhcb" means the option "convert_classic_histograms_to_nhcb". +// +// Case 1. Only classic histogram is exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | YES | NO | NO |. +// | classic=true, nhcb=false | YES | NO | NO |. +// | classic=false, nhcb=true | NO | NO | YES |. +// | classic=true, nhcb=true | YES | NO | YES |. +// +// Case 2. Both classic and exponential histograms are exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | NO | YES | NO |. +// | classic=true, nhcb=false | YES | YES | NO |. +// | classic=false, nhcb=true | NO | YES | NO |. +// | classic=true, nhcb=true | YES | YES | NO |. +// +// Case 3. Only exponential histogram is exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | NO | YES | NO |. +// | classic=true, nhcb=false | NO | YES | NO |. +// | classic=false, nhcb=true | NO | YES | NO |. +// | classic=true, nhcb=true | NO | YES | NO |. +func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) { + type requirement struct { + expectClassic bool + expectExponential bool + expectNHCB bool + } + + cases := []map[string]requirement{ + // Case 1. + { + "classic=false, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: false, expectNHCB: true}, + "classic=true, nhcb=true": {expectClassic: true, expectExponential: false, expectNHCB: true}, + }, + // Case 2. + { + "classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: true, expectExponential: true, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=true": {expectClassic: true, expectExponential: true, expectNHCB: false}, + }, + // Case 3. + { + "classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + }, + } + + // Create parser from keep classic option. + type parserFactory func(bool) Parser + + type testCase struct { + name string + parser parserFactory + classic bool + nhcb bool + exp []parsedEntry + } + + type parserOptions struct { + useUTF8sep bool + hasCreatedTimeStamp bool + } + // Defines the parser name, the Parser factory and the test cases + // supported by the parser and parser options. + parsers := []func() (string, parserFactory, []int, parserOptions){ + func() (string, parserFactory, []int, parserOptions) { + factory := func(keepClassic bool) Parser { + inputBuf := createTestProtoBufHistogram(t) + return NewProtobufParser(inputBuf.Bytes(), keepClassic, labels.NewSymbolTable()) + } + return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true} + }, + func() (string, parserFactory, []int, parserOptions) { + factory := func(keepClassic bool) Parser { + input := createTestOpenMetricsHistogram() + return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + } + return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true} + }, + func() (string, parserFactory, []int, parserOptions) { + factory := func(keepClassic bool) Parser { + input := createTestPromHistogram() + return NewPromParser([]byte(input), labels.NewSymbolTable()) + } + return "Prometheus", factory, []int{1}, parserOptions{} + }, + } + + testCases := []testCase{} + for _, parser := range parsers { + for _, classic := range []bool{false, true} { + for _, nhcb := range []bool{false, true} { + parserName, parser, supportedCases, options := parser() + requirementName := "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb) + tc := testCase{ + name: "parser=" + parserName + ", " + requirementName, + parser: parser, + classic: classic, + nhcb: nhcb, + exp: []parsedEntry{}, + } + for _, caseNumber := range supportedCases { + caseI := cases[caseNumber-1] + req, ok := caseI[requirementName] + require.True(t, ok, "Case %d does not have requirement %s", caseNumber, requirementName) + metric := "test_histogram" + strconv.Itoa(caseNumber) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + help: "Test histogram " + strconv.Itoa(caseNumber), + }) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + typ: model.MetricTypeHistogram, + }) + + var ct *int64 + if options.hasCreatedTimeStamp { + ct = int64p(1000) + } + + var bucketForMetric func(string) string + if options.useUTF8sep { + bucketForMetric = func(s string) string { + return "_bucket\xffle\xff" + s + } + } else { + bucketForMetric = func(s string) string { + return "_bucket{le=\"" + s + "\"}" + } + } + + if req.expectExponential { + // Always expect exponential histogram first. + exponentialSeries := []parsedEntry{ + { + m: metric, + shs: &histogram.Histogram{ + Schema: 3, + Count: 175, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + ZeroCount: 2, + PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, + NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, exponentialSeries...) + } + if req.expectClassic { + // Always expect classic histogram series after exponential. + classicSeries := []parsedEntry{ + { + m: metric + "_count", + v: 175, + lset: labels.FromStrings("__name__", metric+"_count"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + "_sum", + v: 0.0008280461746287094, + lset: labels.FromStrings("__name__", metric+"_sum"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0004899999999999998"), + v: 2, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0003899999999999998"), + v: 4, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0002899999999999998"), + v: 16, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("+Inf"), + v: 175, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, classicSeries...) + } + if req.expectNHCB { + // Always expect NHCB series after classic. + nhcbSeries := []parsedEntry{ + { + m: metric + "{}", + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 175, + Sum: 0.0008280461746287094, + PositiveSpans: []histogram.Span{{Length: 4}}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, nhcbSeries...) + } + } + testCases = append(testCases, tc) + } + } + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := tc.parser(tc.classic) + if tc.nhcb { + p = NewNHCBParser(p, labels.NewSymbolTable(), tc.classic) + } + got := testParse(t, p) + requireEntries(t, tc.exp, got) + }) + } +} + +func createTestProtoBufHistogram(t *testing.T) *bytes.Buffer { + testMetricFamilies := []string{`name: "test_histogram1" +help: "Test histogram 1" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + > + timestamp_ms: 1234568 +>`, `name: "test_histogram2" +help: "Test histogram 2" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +>`, `name: "test_histogram3" +help: "Test histogram 3" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> +`} + + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := &bytes.Buffer{} + + for _, tmf := range testMetricFamilies { + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + return buf +} + +func createTestOpenMetricsHistogram() string { + return `# HELP test_histogram1 Test histogram 1 +# TYPE test_histogram1 histogram +test_histogram1_count 175 1234.568 +test_histogram1_sum 0.0008280461746287094 1234.568 +test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234.568 +test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234.568 +test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234.568 +test_histogram1_bucket{le="+Inf"} 175 1234.568 +test_histogram1_created 1 +# EOF` +} + +func createTestPromHistogram() string { + return `# HELP test_histogram1 Test histogram 1 +# TYPE test_histogram1 histogram +test_histogram1_count 175 1234568 +test_histogram1_sum 0.0008280461746287094 1234768 +test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234568 +test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568 +test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568 +test_histogram1_bucket{le="+Inf"} 175 1234568` +} diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 5f0415d3ee9..3ae9c7ddfc3 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -17,13 +17,16 @@ package textparse import ( + "bytes" "errors" "fmt" "io" "math" + "strconv" "strings" "unicode/utf8" + "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -72,15 +75,16 @@ func (l *openMetricsLexer) Error(es string) { // OpenMetrics text exposition format. // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit type OpenMetricsParser struct { - l *openMetricsLexer - builder labels.ScratchBuilder - series []byte - text []byte - mtype model.MetricType - val float64 - ts int64 - hasTS bool - start int + l *openMetricsLexer + builder labels.ScratchBuilder + series []byte + mfNameLen int // length of metric family name to get from series. + text []byte + mtype model.MetricType + val float64 + ts int64 + hasTS bool + start int // offsets is a list of offsets into series that describe the positions // of the metric name and label names and values for this series. // p.offsets[0] is the start character of the metric name. @@ -95,7 +99,15 @@ type OpenMetricsParser struct { exemplarTs int64 hasExemplarTs bool - skipCTSeries bool + // Created timestamp parsing state. + ct int64 + ctHashSet uint64 + // ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead). + ignoreExemplar bool + // visitedMFName is the metric family name of the last visited metric when peeking ahead + // for _created series during the execution of the CreatedTimestamp method. + visitedMFName []byte + skipCTSeries bool } type openMetricsParserOptions struct { @@ -201,7 +213,7 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) p.builder.Add(label, value) } @@ -252,87 +264,144 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { // CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. // NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - if !TypeRequiresCT(p.mtype) { + if !typeRequiresCT(p.mtype) { // Not a CT supported metric type, fast path. + p.ctHashSet = 0 // Use ctHashSet as a single way of telling "empty cache" return nil } var ( - currLset labels.Labels - buf []byte - peekWithoutNameLsetHash uint64 + buf []byte + currName []byte ) - p.Metric(&currLset) - currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - // Search for the _created line for the currFamilyLsetHash using ephemeral parser until - // we see EOF or new metric family. We have to do it as we don't know where (and if) - // that CT line is. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. - peek := deepCopy(p) + if len(p.series) > 1 && p.series[0] == '{' && p.series[1] == '"' { + // special case for UTF-8 encoded metric family names. + currName = p.series[p.offsets[0]-p.start : p.mfNameLen+2] + } else { + currName = p.series[p.offsets[0]-p.start : p.mfNameLen] + } + + currHash := p.seriesHash(&buf, currName) + // Check cache, perhaps we fetched something already. + if currHash == p.ctHashSet && p.ct > 0 { + return &p.ct + } + + // Create a new lexer to reset the parser once this function is done executing. + resetLexer := &openMetricsLexer{ + b: p.l.b, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + p.skipCTSeries = false + + p.ignoreExemplar = true + savedStart := p.start + defer func() { + p.ignoreExemplar = false + p.start = savedStart + p.l = resetLexer + }() + for { - eType, err := peek.Next() + eType, err := p.Next() if err != nil { - // This means peek will give error too later on, so def no CT line found. + // This means p.Next() will give error too later on, so def no CT line found. // This might result in partial scrape with wrong/missing CT, but only // spec improvement would help. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + p.resetCTParseValues() return nil } if eType != EntrySeries { // Assume we hit different family, no CT line found. + p.resetCTParseValues() return nil } - var peekedLset labels.Labels - peek.Metric(&peekedLset) - peekedName := peekedLset.Get(model.MetricNameLabel) - if !strings.HasSuffix(peekedName, "_created") { + peekedName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + if len(peekedName) < 8 || string(peekedName[len(peekedName)-8:]) != "_created" { // Not a CT line, search more. continue } - // We got a CT line here, but let's search if CT line is actually for our series, edge case. - peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - if peekWithoutNameLsetHash != currFamilyLsetHash { - // CT line for a different series, for our series no CT. + // Remove _created suffix. + peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8]) + if peekedHash != currHash { + // Found CT line for a different series, for our series no CT. + p.resetCTParseValues() return nil } - ct := int64(peek.val) + + // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. + // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps + ct := int64(p.val * 1000.0) + p.setCTParseValues(ct, currHash, currName, true) return &ct } } -// TypeRequiresCT returns true if the metric type requires a _created timestamp. -func TypeRequiresCT(t model.MetricType) bool { - switch t { - case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: - return true - default: - return false +var ( + leBytes = []byte{108, 101} + quantileBytes = []byte{113, 117, 97, 110, 116, 105, 108, 101} +) + +// seriesHash generates a hash based on the metric family name and the offsets +// of label names and values from the parsed OpenMetrics data. It skips quantile +// and le labels for summaries and histograms respectively. +func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []byte) uint64 { + // Iterate through p.offsets to find the label names and values. + for i := 2; i < len(p.offsets); i += 4 { + lStart := p.offsets[i] - p.start + lEnd := p.offsets[i+1] - p.start + label := p.series[lStart:lEnd] + // Skip quantile and le labels for summaries and histograms. + if p.mtype == model.MetricTypeSummary && bytes.Equal(label, quantileBytes) { + continue + } + if p.mtype == model.MetricTypeHistogram && bytes.Equal(label, leBytes) { + continue + } + *offsetsArr = append(*offsetsArr, p.series[lStart:lEnd]...) + vStart := p.offsets[i+2] - p.start + vEnd := p.offsets[i+3] - p.start + *offsetsArr = append(*offsetsArr, p.series[vStart:vEnd]...) } + + *offsetsArr = append(*offsetsArr, metricFamilyName...) + hashedOffsets := xxhash.Sum64(*offsetsArr) + + // Reset the offsets array for later reuse. + *offsetsArr = (*offsetsArr)[:0] + return hashedOffsets } -// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. -func deepCopy(p *OpenMetricsParser) OpenMetricsParser { - newB := make([]byte, len(p.l.b)) - copy(newB, p.l.b) +// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. +// This is useful to prevent re-parsing the same series again and early return the CT value. +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) { + p.ct = ct + p.ctHashSet = ctHashSet + p.visitedMFName = mfName + p.skipCTSeries = skipCTSeries // Do we need to set it? +} - newLexer := &openMetricsLexer{ - b: newB, - i: p.l.i, - start: p.l.start, - err: p.l.err, - state: p.l.state, - } +// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +func (p *OpenMetricsParser) resetCTParseValues() { + p.ctHashSet = 0 + p.skipCTSeries = true +} - newParser := OpenMetricsParser{ - l: newLexer, - builder: p.builder, - mtype: p.mtype, - val: p.val, - skipCTSeries: false, +// typeRequiresCT returns true if the metric type requires a _created timestamp. +func typeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false } - return newParser } // nextToken returns the next token from the openMetricsLexer. @@ -356,10 +425,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.start = p.l.i p.offsets = p.offsets[:0] - p.eOffsets = p.eOffsets[:0] - p.exemplar = p.exemplar[:0] - p.exemplarVal = 0 - p.hasExemplarTs = false + if !p.ignoreExemplar { + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + } switch t := p.nextToken(); t { case tEOFWord: @@ -378,6 +449,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { mStart++ mEnd-- } + p.mfNameLen = mEnd - mStart p.offsets = append(p.offsets, mStart, mEnd) default: return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) @@ -483,6 +555,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) { func (p *OpenMetricsParser) parseComment() error { var err error + + if p.ignoreExemplar { + for t := p.nextToken(); t != tLinebreak; t = p.nextToken() { + if t == tEOF { + return errors.New("data does not end with # EOF") + } + } + return nil + } + // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets, true) if err != nil { @@ -591,10 +673,9 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e // isCreatedSeries returns true if the current series is a _created series. func (p *OpenMetricsParser) isCreatedSeries() bool { - var newLbs labels.Labels - p.Metric(&newLbs) - name := newLbs.Get(model.MetricNameLabel) - if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + metricName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + // check length so the metric is longer than len("_created") + if typeRequiresCT(p.mtype) && len(metricName) >= 8 && string(metricName[len(metricName)-8:]) == "_created" { return true } return false @@ -663,3 +744,15 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } + +// normalizeFloatsInLabelValues ensures that values of the "le" labels of classic histograms and "quantile" labels +// of summaries follow OpenMetrics formatting rules. +func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string { + if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) { + f, err := strconv.ParseFloat(v, 64) + if err == nil { + return formatOpenMetricsFloat(f) + } + } + return v +} diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index cadaabc99f7..9c3c679ab53 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -14,7 +14,7 @@ package textparse import ( - "errors" + "fmt" "io" "testing" @@ -69,32 +69,57 @@ testmetric{label="\"bar\""} 1 # HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter foo_total 17.0 1520879607.789 # {id="counter-test"} 5 -foo_created 1000 +foo_created 1520872607.123 foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 -foo_created{a="b"} 1000 +foo_created{a="b"} 1520872607.123 +foo_total{le="c"} 21.0 +foo_created{le="c"} 1520872621.123 +foo_total{le="1"} 10.0 # HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far # TYPE bar summary bar_count 17.0 bar_sum 324789.3 bar{quantile="0.95"} 123.7 bar{quantile="0.99"} 150.0 -bar_created 1520430000 +bar_created 1520872608.124 # HELP baz Histogram with the same objective as above's summary # TYPE baz histogram baz_bucket{le="0.0"} 0 baz_bucket{le="+Inf"} 17 baz_count 17 baz_sum 324789.3 -baz_created 1520430000 +baz_created 1520872609.125 # HELP fizz_created Gauge which shouldn't be parsed as CT # TYPE fizz_created gauge -fizz_created 17.0` +fizz_created 17.0 +# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 1 +something_bucket{le="1"} 2 +something_bucket{le="+Inf"} 18 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 20 +yum_sum 324789.5 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_count 21 +foobar_created 1520430004 +foobar_sum 324789.6 +foobar{quantile="0.95"} 123.8 +foobar{quantile="0.99"} 150.1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go_gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -107,7 +132,7 @@ fizz_created 17.0` }, { m: `go_gc_duration_seconds{quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), }, { m: `go_gc_duration_seconds{quantile="0.25"}`, v: 7.424100000000001e-05, @@ -169,12 +194,16 @@ fizz_created 17.0` m: `hhh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + }, }, { m: `hhh_count`, v: 1, lset: labels.FromStrings("__name__", "hhh_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, + }, }, { m: "ggh", typ: model.MetricTypeGaugeHistogram, @@ -182,12 +211,16 @@ fizz_created 17.0` m: `ggh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, }, { m: `ggh_count`, v: 1, lset: labels.FromStrings("__name__", "ggh_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, }, { m: "smr_seconds", typ: model.MetricTypeSummary, @@ -195,12 +228,16 @@ fizz_created 17.0` m: `smr_seconds_count`, v: 2, lset: labels.FromStrings("__name__", "smr_seconds_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}, + }, }, { m: `smr_seconds_sum`, v: 42, lset: labels.FromStrings("__name__", "smr_seconds_sum"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, + }, }, { m: "ii", typ: model.MetricTypeInfo, @@ -249,15 +286,28 @@ fizz_created 17.0` v: 17, lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1000), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + }, + ct: int64p(1520872607123), }, { m: `foo_total{a="b"}`, v: 17.0, lset: labels.FromStrings("__name__", "foo_total", "a", "b"), t: int64p(1520879607789), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - ct: int64p(1000), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + }, + ct: int64p(1520872607123), + }, { + m: `foo_total{le="c"}`, + v: 21.0, + lset: labels.FromStrings("__name__", "foo_total", "le", "c"), + ct: int64p(1520872621123), + }, { + m: `foo_total{le="1"}`, + v: 10.0, + lset: labels.FromStrings("__name__", "foo_total", "le", "1"), }, { m: "bar", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", @@ -268,22 +318,22 @@ fizz_created 17.0` m: "bar_count", v: 17.0, lset: labels.FromStrings("__name__", "bar_count"), - ct: int64p(1520430000), + ct: int64p(1520872608124), }, { m: "bar_sum", v: 324789.3, lset: labels.FromStrings("__name__", "bar_sum"), - ct: int64p(1520430000), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.95"}`, v: 123.7, lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), - ct: int64p(1520430000), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.99"}`, v: 150.0, lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), - ct: int64p(1520430000), + ct: int64p(1520872608124), }, { m: "baz", help: "Histogram with the same objective as above's summary", @@ -294,22 +344,22 @@ fizz_created 17.0` m: `baz_bucket{le="0.0"}`, v: 0, lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), - ct: int64p(1520430000), + ct: int64p(1520872609125), }, { m: `baz_bucket{le="+Inf"}`, v: 17, lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), - ct: int64p(1520430000), + ct: int64p(1520872609125), }, { m: `baz_count`, v: 17, lset: labels.FromStrings("__name__", "baz_count"), - ct: int64p(1520430000), + ct: int64p(1520872609125), }, { m: `baz_sum`, v: 324789.3, lset: labels.FromStrings("__name__", "baz_sum"), - ct: int64p(1520430000), + ct: int64p(1520872609125), }, { m: "fizz_created", help: "Gauge which shouldn't be parsed as CT", @@ -320,6 +370,89 @@ fizz_created 17.0` m: `fizz_created`, v: 17, lset: labels.FromStrings("__name__", "fizz_created"), + }, { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something_count`, + v: 18, + lset: labels.FromStrings("__name__", "something_count"), + ct: int64p(1520430001000), + }, { + m: `something_sum`, + v: 324789.4, + lset: labels.FromStrings("__name__", "something_sum"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="0.0"}`, + v: 1, + lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="1"}`, + v: 2, + lset: labels.FromStrings("__name__", "something_bucket", "le", "1.0"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="+Inf"}`, + v: 18, + lset: labels.FromStrings("__name__", "something_bucket", "le", "+Inf"), + ct: int64p(1520430001000), + }, { + m: "yum", + help: "Summary with _created between sum and quantiles", + }, { + m: "yum", + typ: model.MetricTypeSummary, + }, { + m: `yum_count`, + v: 20, + lset: labels.FromStrings("__name__", "yum_count"), + ct: int64p(1520430003000), + }, { + m: `yum_sum`, + v: 324789.5, + lset: labels.FromStrings("__name__", "yum_sum"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"), + ct: int64p(1520430003000), + }, { + m: "foobar", + help: "Summary with _created as the first line", + }, { + m: "foobar", + typ: model.MetricTypeSummary, + }, { + m: `foobar_count`, + v: 21, + lset: labels.FromStrings("__name__", "foobar_count"), + ct: int64p(1520430004000), + }, { + m: `foobar_sum`, + v: 324789.6, + lset: labels.FromStrings("__name__", "foobar_sum"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.95"}`, + v: 123.8, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.99"}`, + v: 150.1, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"), + ct: int64p(1520430004000), }, { m: "metric", help: "foo\x00bar", @@ -331,7 +464,8 @@ fizz_created 17.0` } p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) - checkParseResultsWithCT(t, p, exp, true) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestUTF8OpenMetricsParse(t *testing.T) { @@ -346,7 +480,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 -{"go.gc_duration_seconds_created"} 12313 +{"go.gc_duration_seconds_created"} 1520872607.123 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 @@ -356,7 +490,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { input += "\n# EOF\n" - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go.gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -369,13 +503,13 @@ func TestUTF8OpenMetricsParse(t *testing.T) { }, { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), - ct: int64p(12313), + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"), + ct: int64p(1520872607123), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), - ct: int64p(12313), + ct: int64p(1520872607123), }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -405,7 +539,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), } p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) - checkParseResultsWithCT(t, p, exp, true) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -699,7 +834,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -764,231 +899,121 @@ func TestOMNullByteHandling(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() } if c.err == "" { - require.Equal(t, io.EOF, err, "test %d", i) + require.ErrorIs(t, err, io.EOF, "test %d", i) continue } - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } -// While not desirable, there are cases were CT fails to parse and -// these tests show them. +// TestCTParseFailures tests known failure edge cases, we know does not work due +// current OM spec limitations or clients with broken OM format. // TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. func TestCTParseFailures(t *testing.T) { - input := `# HELP something Histogram with _created between buckets and summary -# TYPE something histogram -something_count 17 -something_sum 324789.3 -something_created 1520430001 -something_bucket{le="0.0"} 0 -something_bucket{le="+Inf"} 17 -# HELP thing Histogram with _created as first line + for _, tcase := range []struct { + name string + input string + expected []parsedEntry + }{ + { + name: "_created line is a first one", + input: `# HELP thing histogram with _created as first line # TYPE thing histogram -thing_created 1520430002 +thing_created 1520872607.123 thing_count 17 thing_sum 324789.3 thing_bucket{le="0.0"} 0 thing_bucket{le="+Inf"} 17 -# HELP yum Summary with _created between sum and quantiles -# TYPE yum summary -yum_count 17.0 -yum_sum 324789.3 -yum_created 1520430003 -yum{quantile="0.95"} 123.7 -yum{quantile="0.99"} 150.0 -# HELP foobar Summary with _created as the first line -# TYPE foobar summary -foobar_created 1520430004 -foobar_count 17.0 -foobar_sum 324789.3 -foobar{quantile="0.95"} 123.7 -foobar{quantile="0.99"} 150.0` - - input += "\n# EOF\n" - - int64p := func(x int64) *int64 { return &x } - - type expectCT struct { - m string - ct *int64 - typ model.MetricType - help string - isErr bool - } - - exp := []expectCT{ - { - m: "something", - help: "Histogram with _created between buckets and summary", - isErr: false, - }, { - m: "something", - typ: model.MetricTypeHistogram, - isErr: false, - }, { - m: `something_count`, - ct: int64p(1520430001), - isErr: false, - }, { - m: `something_sum`, - ct: int64p(1520430001), - isErr: false, - }, { - m: `something_bucket{le="0.0"}`, - ct: int64p(1520430001), - isErr: true, - }, { - m: `something_bucket{le="+Inf"}`, - ct: int64p(1520430001), - isErr: true, - }, { - m: "thing", - help: "Histogram with _created as first line", - isErr: false, - }, { - m: "thing", - typ: model.MetricTypeHistogram, - isErr: false, - }, { - m: `thing_count`, - ct: int64p(1520430002), - isErr: true, - }, { - m: `thing_sum`, - ct: int64p(1520430002), - isErr: true, - }, { - m: `thing_bucket{le="0.0"}`, - ct: int64p(1520430002), - isErr: true, - }, { - m: `thing_bucket{le="+Inf"}`, - ct: int64p(1520430002), - isErr: true, - }, { - m: "yum", - help: "Summary with _created between summary and quantiles", - isErr: false, - }, { - m: "yum", - typ: model.MetricTypeSummary, - isErr: false, - }, { - m: "yum_count", - ct: int64p(1520430003), - isErr: false, - }, { - m: "yum_sum", - ct: int64p(1520430003), - isErr: false, - }, { - m: `yum{quantile="0.95"}`, - ct: int64p(1520430003), - isErr: true, - }, { - m: `yum{quantile="0.99"}`, - ct: int64p(1520430003), - isErr: true, - }, { - m: "foobar", - help: "Summary with _created as the first line", - isErr: false, - }, { - m: "foobar", - typ: model.MetricTypeSummary, - isErr: false, - }, { - m: "foobar_count", - ct: int64p(1520430004), - isErr: true, - }, { - m: "foobar_sum", - ct: int64p(1520430004), - isErr: true, - }, { - m: `foobar{quantile="0.95"}`, - ct: int64p(1520430004), - isErr: true, - }, { - m: `foobar{quantile="0.99"}`, - ct: int64p(1520430004), - isErr: true, - }, - } - - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) - i := 0 - - var res labels.Labels - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - p.Metric(&res) - - if ct := p.CreatedTimestamp(); exp[i].isErr { - require.Nil(t, ct) - } else { - require.Equal(t, *exp[i].ct, *ct) - } - default: - i++ - continue - } - i++ +# HELP thing_c counter with _created as first line +# TYPE thing_c counter +thing_c_created 1520872607.123 +thing_c_total 14123.232 +# EOF +`, + expected: []parsedEntry{ + { + m: "thing", + help: "histogram with _created as first line", + }, + { + m: "thing", + typ: model.MetricTypeHistogram, + }, + { + m: `thing_count`, + ct: nil, // Should be int64p(1520872607123). + }, + { + m: `thing_sum`, + ct: nil, // Should be int64p(1520872607123). + }, + { + m: `thing_bucket{le="0.0"}`, + ct: nil, // Should be int64p(1520872607123). + }, + { + m: `thing_bucket{le="+Inf"}`, + ct: nil, // Should be int64p(1520872607123), + }, + { + m: "thing_c", + help: "counter with _created as first line", + }, + { + m: "thing_c", + typ: model.MetricTypeCounter, + }, + { + m: `thing_c_total`, + ct: nil, // Should be int64p(1520872607123). + }, + }, + }, + { + // TODO(bwplotka): Kind of correct bevaviour? If yes, let's move to the OK tests above. + name: "maybe counter with no meta", + input: `foo_total 17.0 +foo_created 1520872607.123 +foo_total{a="b"} 17.0 +foo_created{a="b"} 1520872608.123 +# EOF +`, + expected: []parsedEntry{ + { + m: `foo_total`, + }, + { + m: `foo_created`, + }, + { + m: `foo_total{a="b"}`, + }, + { + m: `foo_created{a="b"}`, + }, + }, + }, + } { + t.Run(fmt.Sprintf("case=%v", tcase.name), func(t *testing.T) { + p := NewOpenMetricsParser([]byte(tcase.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + got := testParse(t, p) + resetValAndLset(got) // Keep this test focused on metric, basic entries and CT only. + requireEntries(t, tcase.expected, got) + }) } } -func TestDeepCopy(t *testing.T) { - input := []byte(`# HELP go_goroutines A gauge goroutines. -# TYPE go_goroutines gauge -go_goroutines 33 123.123 -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds -go_gc_duration_seconds_created`) - - st := labels.NewSymbolTable() - parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser) - - // Modify the original parser state - _, err := parser.Next() - require.NoError(t, err) - require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) - require.True(t, parser.skipCTSeries) - - // Create a deep copy of the parser - copyParser := deepCopy(parser) - etype, err := copyParser.Next() - require.NoError(t, err) - require.Equal(t, EntryType, etype) - require.True(t, parser.skipCTSeries) - require.False(t, copyParser.skipCTSeries) - - // Modify the original parser further - parser.Next() - parser.Next() - parser.Next() - require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) - require.Equal(t, "summary", string(parser.mtype)) - require.False(t, copyParser.skipCTSeries) - require.True(t, parser.skipCTSeries) - - // Ensure the copy remains unchanged - copyParser.Next() - copyParser.Next() - require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) - require.False(t, copyParser.skipCTSeries) +func resetValAndLset(e []parsedEntry) { + for i := range e { + e[i].v = 0 + e[i].lset = labels.EmptyLabels() + } } diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index a611f3aea76..0ab932c665b 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -239,7 +239,8 @@ func (p *PromParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) + p.builder.Add(label, value) } @@ -502,7 +503,7 @@ func unreplace(s string) string { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } func parseFloat(s string) (float64, error) { diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 7971d23b7e0..e8cf66f5397 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -14,37 +14,15 @@ package textparse import ( - "bytes" - "errors" "io" - "os" - "strings" "testing" - "github.com/klauspost/compress/gzip" - "github.com/stretchr/testify/require" - - "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/util/testutil" ) -type expectedParse struct { - lset labels.Labels - m string - t *int64 - v float64 - typ model.MetricType - help string - unit string - comment string - e *exemplar.Exemplar - ct *int64 -} - func TestPromParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -53,6 +31,13 @@ go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05 go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05 go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05 go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05 +# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. +# TYPE prometheus_http_request_duration_seconds histogram +prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 423 +prometheus_http_request_duration_seconds_bucket{handler="/",le="2"} 1423 +prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 1423 +prometheus_http_request_duration_seconds_sum{handler="/"} 2000 +prometheus_http_request_duration_seconds_count{handler="/"} 1423 # Hrandom comment starting with prefix of HELP # wind_speed{A="2",c="3"} 12345 @@ -72,13 +57,12 @@ some:aggregate:rate5m{a_b="c"} 1 go_goroutines 33 123123 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 -testmetric{label="\"bar\""} 1` +testmetric{label="\"bar\""} 1 +testmetric{le="10"} 1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" - int64p := func(x int64) *int64 { return &x } - - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go_gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -88,7 +72,7 @@ testmetric{label="\"bar\""} 1` }, { m: `go_gc_duration_seconds{quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), }, { m: `go_gc_duration_seconds{quantile="0.25",}`, v: 7.424100000000001e-05, @@ -105,6 +89,32 @@ testmetric{label="\"bar\""} 1` m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`, v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"), + }, { + m: "prometheus_http_request_duration_seconds", + help: "Histogram of latencies for HTTP requests.", + }, { + m: "prometheus_http_request_duration_seconds", + typ: model.MetricTypeHistogram, + }, { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`, + v: 423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"), + }, { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`, + v: 1423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"), + }, { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`, + v: 1423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"), + }, { + m: `prometheus_http_request_duration_seconds_sum{handler="/"}`, + v: 2000, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"), + }, { + m: `prometheus_http_request_duration_seconds_count{handler="/"}`, + v: 1423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"), }, { comment: "# Hrandom comment starting with prefix of HELP", }, { @@ -140,7 +150,7 @@ testmetric{label="\"bar\""} 1` v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), }, { - // NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed. + // NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed. m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`, v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), @@ -175,6 +185,10 @@ testmetric{label="\"bar\""} 1` m: "testmetric{label=\"\\\"bar\\\"\"}", v: 1, lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: `testmetric{le="10"}`, + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "le", "10"), }, { m: "metric", help: "foo\x00bar", @@ -186,80 +200,8 @@ testmetric{label="\"bar\""} 1` } p := NewPromParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) -} - -func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { - checkParseResultsWithCT(t, p, exp, false) -} - -func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) { - i := 0 - - var res labels.Labels - - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - m, ts, v := p.Series() - - p.Metric(&res) - - if ctLinesRemoved { - // Are CT series skipped? - _, typ := p.Type() - if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { - t.Fatalf("we exped created lines skipped") - } - } - - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].t, ts) - require.Equal(t, exp[i].v, v) - testutil.RequireEqual(t, exp[i].lset, res) - - var e exemplar.Exemplar - found := p.Exemplar(&e) - if exp[i].e == nil { - require.False(t, found) - } else { - require.True(t, found) - testutil.RequireEqual(t, *exp[i].e, e) - } - if ct := p.CreatedTimestamp(); ct != nil { - require.Equal(t, *exp[i].ct, *ct) - } else { - require.Nil(t, exp[i].ct) - } - - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].typ, typ) - - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].help, string(h)) - - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].unit, string(u)) - - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment())) - } - - i++ - } - require.Len(t, exp, i) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestUTF8PromParse(t *testing.T) { @@ -283,7 +225,7 @@ func TestUTF8PromParse(t *testing.T) { {"go.gc_duration_seconds_count"} 99 {"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0` - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go.gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -293,7 +235,7 @@ func TestUTF8PromParse(t *testing.T) { }, { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"), }, { m: `{"go.gc_duration_seconds",quantile="0.25",}`, v: 7.424100000000001e-05, @@ -339,7 +281,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), } p := NewPromParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestPromParseErrors(t *testing.T) { @@ -423,8 +366,7 @@ func TestPromParseErrors(t *testing.T) { for err == nil { _, err = p.Next() } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } @@ -483,194 +425,6 @@ func TestPromNullByteHandling(t *testing.T) { continue } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) - } -} - -const ( - promtestdataSampleCount = 410 -) - -func BenchmarkParse(b *testing.B) { - for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st) - }, - } { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - buf, err := io.ReadAll(f) - require.NoError(b, err) - - b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - var res labels.Labels - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) { - total := 0 - var res labels.Labels - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run("expfmt-text/"+fn, func(b *testing.B) { - if parserName != "prometheus" { - b.Skip() - } - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - total := 0 - - for i := 0; i < b.N; i += promtestdataSampleCount { - decSamples := make(model.Vector, 0, 50) - sdec := expfmt.SampleDecoder{ - Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(expfmt.TypeTextPlain)), - Opts: &expfmt.DecodeOptions{ - Timestamp: model.TimeFromUnixNano(0), - }, - } - - for { - if err = sdec.Decode(&decSamples); err != nil { - break - } - total += len(decSamples) - decSamples = decSamples[:0] - } - } - _ = total - }) - } - } -} - -func BenchmarkGzip(b *testing.B) { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - b.Run(fn, func(b *testing.B) { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - - n, err := io.Copy(gw, f) - require.NoError(b, err) - require.NoError(b, gw.Close()) - - gbuf, err := io.ReadAll(&buf) - require.NoError(b, err) - - k := b.N / promtestdataSampleCount - - b.ReportAllocs() - b.SetBytes(n / promtestdataSampleCount) - b.ResetTimer() - - total := 0 - - for i := 0; i < k; i++ { - gr, err := gzip.NewReader(bytes.NewReader(gbuf)) - require.NoError(b, err) - - d, err := io.ReadAll(gr) - require.NoError(b, err) - require.NoError(b, gr.Close()) - - total += len(d) - } - _ = total - }) + require.EqualError(t, err, c.err, "test %d", i) } } diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index e384a75fca4..a77e1d728f3 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -20,7 +20,9 @@ import ( "fmt" "io" "math" + "strconv" "strings" + "sync" "unicode/utf8" "github.com/gogo/protobuf/proto" @@ -34,6 +36,15 @@ import ( dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) +// floatFormatBufPool is exclusively used in formatOpenMetricsFloat. +var floatFormatBufPool = sync.Pool{ + New: func() interface{} { + // To contain at most 17 digits and additional syntax for a float64. + b := make([]byte, 0, 24) + return &b + }, +} + // ProtobufParser is a very inefficient way of unmarshaling the old Prometheus // protobuf format and then present it as it if were parsed by a // Prometheus-2-style text parser. This is only done so that we can easily plug @@ -457,6 +468,12 @@ func (p *ProtobufParser) Next() (Entry, error) { p.state = EntryHelp case EntryHelp: + if p.mf.Unit != "" { + p.state = EntryUnit + } else { + p.state = EntryType + } + case EntryUnit: p.state = EntryType case EntryType: t := p.mf.GetType() @@ -604,7 +621,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) } -// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". func formatOpenMetricsFloat(f float64) string { @@ -623,11 +640,15 @@ func formatOpenMetricsFloat(f float64) string { case math.IsInf(f, -1): return "-Inf" } - s := fmt.Sprint(f) - if strings.ContainsAny(s, "e.") { - return s + bp := floatFormatBufPool.Get().(*[]byte) + defer floatFormatBufPool.Put(bp) + + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if bytes.ContainsAny(*bp, "e.") { + return string(*bp) } - return s + ".0" + *bp = append(*bp, '.', '0') + return string(*bp) } // isNativeHistogram returns false iff the provided histograms has no spans at diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index cf34ae52df1..065459a69af 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -16,8 +16,6 @@ package textparse import ( "bytes" "encoding/binary" - "errors" - "io" "testing" "github.com/gogo/protobuf/proto" @@ -27,12 +25,12 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/util/testutil" - dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) -func createTestProtoBuf(t *testing.T) *bytes.Buffer { +func createTestProtoBuf(t testing.TB) *bytes.Buffer { + t.Helper() + testMetricFamilies := []string{ `name: "go_build_info" help: "Build information about the main Go module." @@ -411,6 +409,49 @@ metric: < > > +`, + `name: "test_histogram3" +help: "Similar histogram as before but now with integer buckets." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 6 + sample_sum: 50 + bucket: < + cumulative_count: 2 + upper_bound: -20 + > + bucket: < + cumulative_count: 4 + upper_bound: 20 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: 15 + timestamp: < + seconds: 1625851153 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 6 + upper_bound: 30 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: 25 + > + > + schema: 0 + zero_threshold: 0 + > +> + `, `name: "test_histogram_family" help: "Test histogram metric family with two very simple histograms." @@ -783,32 +824,17 @@ metric: < } func TestProtobufParse(t *testing.T) { - type parseResult struct { - lset labels.Labels - m string - t int64 - v float64 - typ model.MetricType - help string - unit string - comment string - shs *histogram.Histogram - fhs *histogram.FloatHistogram - e []exemplar.Exemplar - ct int64 - } - inputBuf := createTestProtoBuf(t) scenarios := []struct { name string parser Parser - expected []parseResult + expected []parsedEntry }{ { name: "ignore classic buckets of native histograms", parser: NewProtobufParser(inputBuf.Bytes(), false, labels.NewSymbolTable()), - expected: []parseResult{ + expected: []parsedEntry{ { m: "go_build_info", help: "Build information about the main Go module.", @@ -830,6 +856,9 @@ func TestProtobufParse(t *testing.T) { { m: "go_memstats_alloc_bytes_total", help: "Total number of bytes allocated, even if freed.", + }, + { + m: "go_memstats_alloc_bytes_total", unit: "bytes", }, { @@ -842,7 +871,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "go_memstats_alloc_bytes_total", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, }, }, @@ -856,7 +885,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "something_untyped", - t: 1234567, + t: int64p(1234567), v: 42, lset: labels.FromStrings( "__name__", "something_untyped", @@ -872,7 +901,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -893,7 +922,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -907,7 +936,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 175, @@ -929,7 +958,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -943,7 +972,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ Count: 175.0, ZeroCount: 2.0, @@ -964,7 +993,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -978,7 +1007,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 175.0, @@ -1000,7 +1029,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -1041,7 +1070,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram2_bucket", "le", "-0.00038", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, }, }, @@ -1052,7 +1081,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram2_bucket", "le", "1.0", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, }, }, @@ -1064,6 +1093,66 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, + { + m: "test_histogram3", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram3", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram3_count", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_count", + ), + }, + { + m: "test_histogram3_sum", + v: 50, + lset: labels.FromStrings( + "__name__", "test_histogram3_sum", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff-20.0", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "-20.0", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff20.0", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "20.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff30.0", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "30.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "+Inf", + ), + }, { m: "test_histogram_family", help: "Test histogram metric family with two very simple histograms.", @@ -1235,7 +1324,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_counter_with_createdtimestamp", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), @@ -1251,7 +1340,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), @@ -1259,7 +1348,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), @@ -1274,7 +1363,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -1294,7 +1383,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gaugehistogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -1314,7 +1403,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_native_histogram_exemplars", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1335,7 +1424,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, @@ -1350,7 +1439,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_native_histogram_exemplars2", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1371,7 +1460,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -1380,16 +1469,16 @@ func TestProtobufParse(t *testing.T) { { name: "parse classic and native buckets", parser: NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()), - expected: []parseResult{ - { // 0 + expected: []parsedEntry{ + { m: "go_build_info", help: "Build information about the main Go module.", }, - { // 1 + { m: "go_build_info", typ: model.MetricTypeGauge, }, - { // 2 + { m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", v: 1, lset: labels.FromStrings( @@ -1399,51 +1488,55 @@ func TestProtobufParse(t *testing.T) { "version", "(devel)", ), }, - { // 3 + { m: "go_memstats_alloc_bytes_total", help: "Total number of bytes allocated, even if freed.", }, - { // 4 + { + m: "go_memstats_alloc_bytes_total", + unit: "bytes", + }, + { m: "go_memstats_alloc_bytes_total", typ: model.MetricTypeCounter, }, - { // 5 + { m: "go_memstats_alloc_bytes_total", v: 1.546544e+06, lset: labels.FromStrings( "__name__", "go_memstats_alloc_bytes_total", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, }, }, - { // 6 + { m: "something_untyped", help: "Just to test the untyped type.", }, - { // 7 + { m: "something_untyped", typ: model.MetricTypeUnknown, }, - { // 8 + { m: "something_untyped", - t: 1234567, + t: int64p(1234567), v: 42, lset: labels.FromStrings( "__name__", "something_untyped", ), }, - { // 9 + { m: "test_histogram", help: "Test histogram with many buckets removed to keep it manageable in size.", }, - { // 10 + { m: "test_histogram", typ: model.MetricTypeHistogram, }, - { // 11 + { m: "test_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1464,79 +1557,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 12 + { m: "test_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_count", ), }, - { // 13 + { m: "test_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_sum", ), }, - { // 14 + { m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 15 + { m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 16 + { m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 17 + { m: "test_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "+Inf", ), }, - { // 18 + { m: "test_gauge_histogram", help: "Like test_histogram but as gauge histogram.", }, - { // 19 + { m: "test_gauge_histogram", typ: model.MetricTypeGaugeHistogram, }, - { // 20 + { m: "test_gauge_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 175, @@ -1558,79 +1651,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 21 + { m: "test_gauge_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_histogram_count", ), }, - { // 22 + { m: "test_gauge_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_gauge_histogram_sum", ), }, - { // 23 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 24 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 25 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 26 + { m: "test_gauge_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "+Inf", ), }, - { // 27 + { m: "test_float_histogram", help: "Test float histogram with many buckets removed to keep it manageable in size.", }, - { // 28 + { m: "test_float_histogram", typ: model.MetricTypeHistogram, }, - { // 29 + { m: "test_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ Count: 175.0, ZeroCount: 2.0, @@ -1651,79 +1744,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 30 + { m: "test_float_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_float_histogram_count", ), }, - { // 31 + { m: "test_float_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_float_histogram_sum", ), }, - { // 32 + { m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 33 + { m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 34 + { m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 35 + { m: "test_float_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "+Inf", ), }, - { // 36 + { m: "test_gauge_float_histogram", help: "Like test_float_histogram but as gauge histogram.", }, - { // 37 + { m: "test_gauge_float_histogram", typ: model.MetricTypeGaugeHistogram, }, - { // 38 + { m: "test_gauge_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 175.0, @@ -1745,91 +1838,91 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 39 + { m: "test_gauge_float_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_count", ), }, - { // 40 + { m: "test_gauge_float_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_sum", ), }, - { // 41 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 42 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 43 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 44 + { m: "test_gauge_float_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "+Inf", ), }, - { // 45 + { m: "test_histogram2", help: "Similar histogram as before but now without sparse buckets.", }, - { // 46 + { m: "test_histogram2", typ: model.MetricTypeHistogram, }, - { // 47 + { m: "test_histogram2_count", v: 175, lset: labels.FromStrings( "__name__", "test_histogram2_count", ), }, - { // 48 + { m: "test_histogram2_sum", v: 0.000828, lset: labels.FromStrings( "__name__", "test_histogram2_sum", ), }, - { // 49 + { m: "test_histogram2_bucket\xffle\xff-0.00048", v: 2, lset: labels.FromStrings( @@ -1837,29 +1930,29 @@ func TestProtobufParse(t *testing.T) { "le", "-0.00048", ), }, - { // 50 + { m: "test_histogram2_bucket\xffle\xff-0.00038", v: 4, lset: labels.FromStrings( "__name__", "test_histogram2_bucket", "le", "-0.00038", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, }, }, - { // 51 + { m: "test_histogram2_bucket\xffle\xff1.0", v: 16, lset: labels.FromStrings( "__name__", "test_histogram2_bucket", "le", "1.0", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, }, }, - { // 52 + { m: "test_histogram2_bucket\xffle\xff+Inf", v: 175, lset: labels.FromStrings( @@ -1867,15 +1960,75 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 53 + { + m: "test_histogram3", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram3", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram3_count", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_count", + ), + }, + { + m: "test_histogram3_sum", + v: 50, + lset: labels.FromStrings( + "__name__", "test_histogram3_sum", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff-20.0", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "-20.0", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff20.0", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "20.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff30.0", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "30.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "+Inf", + ), + }, + { m: "test_histogram_family", help: "Test histogram metric family with two very simple histograms.", }, - { // 54 + { m: "test_histogram_family", typ: model.MetricTypeHistogram, }, - { // 55 + { m: "test_histogram_family\xfffoo\xffbar", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -1893,7 +2046,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 56 + { m: "test_histogram_family_count\xfffoo\xffbar", v: 5, lset: labels.FromStrings( @@ -1901,7 +2054,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 57 + { m: "test_histogram_family_sum\xfffoo\xffbar", v: 12.1, lset: labels.FromStrings( @@ -1909,7 +2062,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 58 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", v: 2, lset: labels.FromStrings( @@ -1918,7 +2071,7 @@ func TestProtobufParse(t *testing.T) { "le", "1.1", ), }, - { // 59 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", v: 3, lset: labels.FromStrings( @@ -1927,7 +2080,7 @@ func TestProtobufParse(t *testing.T) { "le", "2.2", ), }, - { // 60 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", v: 5, lset: labels.FromStrings( @@ -1936,7 +2089,7 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 61 + { m: "test_histogram_family\xfffoo\xffbaz", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -1954,7 +2107,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 62 + { m: "test_histogram_family_count\xfffoo\xffbaz", v: 6, lset: labels.FromStrings( @@ -1962,7 +2115,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 63 + { m: "test_histogram_family_sum\xfffoo\xffbaz", v: 13.1, lset: labels.FromStrings( @@ -1970,7 +2123,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 64 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", v: 1, lset: labels.FromStrings( @@ -1979,7 +2132,7 @@ func TestProtobufParse(t *testing.T) { "le", "1.1", ), }, - { // 65 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", v: 5, lset: labels.FromStrings( @@ -1988,7 +2141,7 @@ func TestProtobufParse(t *testing.T) { "le", "2.2", ), }, - { // 66 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", v: 6, lset: labels.FromStrings( @@ -1997,15 +2150,15 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 67 + { m: "test_float_histogram_with_zerothreshold_zero", help: "Test float histogram with a zero threshold of zero.", }, - { // 68 + { m: "test_float_histogram_with_zerothreshold_zero", typ: model.MetricTypeHistogram, }, - { // 69 + { m: "test_float_histogram_with_zerothreshold_zero", fhs: &histogram.FloatHistogram{ Count: 5.0, @@ -2021,15 +2174,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_float_histogram_with_zerothreshold_zero", ), }, - { // 70 + { m: "rpc_durations_seconds", help: "RPC latency distributions.", }, - { // 71 + { m: "rpc_durations_seconds", typ: model.MetricTypeSummary, }, - { // 72 + { m: "rpc_durations_seconds_count\xffservice\xffexponential", v: 262, lset: labels.FromStrings( @@ -2037,7 +2190,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 73 + { m: "rpc_durations_seconds_sum\xffservice\xffexponential", v: 0.00025551262820703587, lset: labels.FromStrings( @@ -2045,7 +2198,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 74 + { m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", v: 6.442786329648548e-07, lset: labels.FromStrings( @@ -2054,7 +2207,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 75 + { m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", v: 1.9435742936658396e-06, lset: labels.FromStrings( @@ -2063,7 +2216,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 76 + { m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", v: 4.0471608667037015e-06, lset: labels.FromStrings( @@ -2072,37 +2225,37 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 77 + { m: "without_quantiles", help: "A summary without quantiles.", }, - { // 78 + { m: "without_quantiles", typ: model.MetricTypeSummary, }, - { // 79 + { m: "without_quantiles_count", v: 42, lset: labels.FromStrings( "__name__", "without_quantiles_count", ), }, - { // 80 + { m: "without_quantiles_sum", v: 1.234, lset: labels.FromStrings( "__name__", "without_quantiles_sum", ), }, - { // 81 + { m: "empty_histogram", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", }, - { // 82 + { m: "empty_histogram", typ: model.MetricTypeHistogram, }, - { // 83 + { m: "empty_histogram", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -2113,57 +2266,57 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, - { // 84 + { m: "test_counter_with_createdtimestamp", help: "A counter with a created timestamp.", }, - { // 85 + { m: "test_counter_with_createdtimestamp", typ: model.MetricTypeCounter, }, - { // 86 + { m: "test_counter_with_createdtimestamp", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), }, - { // 87 + { m: "test_summary_with_createdtimestamp", help: "A summary with a created timestamp.", }, - { // 88 + { m: "test_summary_with_createdtimestamp", typ: model.MetricTypeSummary, }, - { // 89 + { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), }, - { // 90 + { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: 1000, + ct: int64p(1000), lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), }, - { // 91 + { m: "test_histogram_with_createdtimestamp", help: "A histogram with a created timestamp.", }, - { // 92 + { m: "test_histogram_with_createdtimestamp", typ: model.MetricTypeHistogram, }, - { // 93 + { m: "test_histogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -2173,17 +2326,17 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_createdtimestamp", ), }, - { // 94 + { m: "test_gaugehistogram_with_createdtimestamp", help: "A gauge histogram with a created timestamp.", }, - { // 95 + { m: "test_gaugehistogram_with_createdtimestamp", typ: model.MetricTypeGaugeHistogram, }, - { // 96 + { m: "test_gaugehistogram_with_createdtimestamp", - ct: 1000, + ct: int64p(1000), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -2193,17 +2346,17 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_gaugehistogram_with_createdtimestamp", ), }, - { // 97 + { m: "test_histogram_with_native_histogram_exemplars", help: "A histogram with native histogram exemplars.", }, - { // 98 + { m: "test_histogram_with_native_histogram_exemplars", typ: model.MetricTypeHistogram, }, - { // 99 + { m: "test_histogram_with_native_histogram_exemplars", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2224,80 +2377,80 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, - { // 100 + { m: "test_histogram_with_native_histogram_exemplars_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_count", ), }, - { // 101 + { m: "test_histogram_with_native_histogram_exemplars_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_sum", ), }, - { // 102 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0004899999999999998", ), }, - { // 103 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 104 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 105 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "+Inf", ), }, - { // 106 + { m: "test_histogram_with_native_histogram_exemplars2", help: "Another histogram with native histogram exemplars.", }, - { // 107 + { m: "test_histogram_with_native_histogram_exemplars2", typ: model.MetricTypeHistogram, }, - { // 108 + { m: "test_histogram_with_native_histogram_exemplars2", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2318,56 +2471,56 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 109 + { m: "test_histogram_with_native_histogram_exemplars2_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_count", ), }, - { // 110 + { m: "test_histogram_with_native_histogram_exemplars2_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_sum", ), }, - { // 111 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", "le", "-0.0004899999999999998", ), }, - { // 112 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", "le", "-0.0003899999999999998", ), }, - { // 113 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", "le", "-0.0002899999999999998", ), }, - { // 114 + { m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", @@ -2381,94 +2534,11 @@ func TestProtobufParse(t *testing.T) { for _, scenario := range scenarios { t.Run(scenario.name, func(t *testing.T) { var ( - i int - res labels.Labels p = scenario.parser exp = scenario.expected ) - - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - m, ts, v := p.Series() - - var e exemplar.Exemplar - p.Metric(&res) - eFound := p.Exemplar(&e) - ct := p.CreatedTimestamp() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if ts != nil { - require.Equal(t, exp[i].t, *ts, "i: %d", i) - } else { - require.Equal(t, int64(0), exp[i].t, "i: %d", i) - } - require.Equal(t, exp[i].v, v, "i: %d", i) - testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) - if len(exp[i].e) == 0 { - require.False(t, eFound, "i: %d", i) - } else { - require.True(t, eFound, "i: %d", i) - testutil.RequireEqual(t, exp[i].e[0], e, "i: %d", i) - require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) - } - if exp[i].ct != 0 { - require.NotNilf(t, ct, "i: %d", i) - require.Equal(t, exp[i].ct, *ct, "i: %d", i) - } else { - require.Nilf(t, ct, "i: %d", i) - } - - case EntryHistogram: - m, ts, shs, fhs := p.Histogram() - p.Metric(&res) - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if ts != nil { - require.Equal(t, exp[i].t, *ts, "i: %d", i) - } else { - require.Equal(t, int64(0), exp[i].t, "i: %d", i) - } - testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if shs != nil { - require.Equal(t, exp[i].shs, shs, "i: %d", i) - } else { - require.Equal(t, exp[i].fhs, fhs, "i: %d", i) - } - j := 0 - for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { - testutil.RequireEqual(t, exp[i].e[j], e, "i: %d", i) - e = exemplar.Exemplar{} - } - require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) - - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].typ, typ, "i: %d", i) - - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].help, string(h), "i: %d", i) - - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].unit, string(u), "i: %d", i) - - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment()), "i: %d", i) - } - - i++ - } - require.Len(t, exp, i) + got := testParse(t, p) + requireEntries(t, exp, got) }) } } diff --git a/model/textparse/testdata/omhistogramdata.txt b/model/textparse/testdata/omhistogramdata.txt new file mode 100644 index 00000000000..18761683553 --- /dev/null +++ b/model/textparse/testdata/omhistogramdata.txt @@ -0,0 +1,45 @@ +# HELP golang_manual_histogram_seconds This is a histogram with manually selected parameters +# TYPE golang_manual_histogram_seconds histogram +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.005"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.01"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.025"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.05"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.1"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.25"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="1.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="2.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="5.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="10.0"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="+Inf"} 1 +golang_manual_histogram_seconds_sum{address="0.0.0.0",generation="20",port="5001"} 10.0 +golang_manual_histogram_seconds_count{address="0.0.0.0",generation="20",port="5001"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.005"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.01"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.025"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.05"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.1"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.25"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="1.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="2.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="5.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="10.0"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="+Inf"} 1 +golang_manual_histogram_seconds_sum{address="0.0.0.0",generation="20",port="5002"} 10.0 +golang_manual_histogram_seconds_count{address="0.0.0.0",generation="20",port="5002"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.005"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.01"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.025"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.05"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.1"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.25"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="1.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="2.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="5.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="10.0"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="+Inf"} 1 +golang_manual_histogram_seconds_sum{address="0.0.0.0",generation="20",port="5003"} 10.0 +golang_manual_histogram_seconds_count{address="0.0.0.0",generation="20",port="5003"} 1 +# EOF \ No newline at end of file diff --git a/model/textparse/testdata/omtestdata.txt b/model/textparse/testdata/omtestdata.txt new file mode 100644 index 00000000000..0f5f78b8b9e --- /dev/null +++ b/model/textparse/testdata/omtestdata.txt @@ -0,0 +1,64 @@ +# HELP go_build_info Build information about the main Go module. +# TYPE go_build_info gauge +go_build_info{checksum="",path="",version=""} 1.0 +# HELP promhttp_metric_handler_errors Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors counter +promhttp_metric_handler_errors_total{cause="encoding"} 0.0 +promhttp_metric_handler_errors_created{cause="encoding"} 1.726839813016397e+09 +promhttp_metric_handler_errors_total{cause="gathering"} 0.0 +promhttp_metric_handler_errors_created{cause="gathering"} 1.726839813016395e+09 +# HELP rpc_durations_histogram_seconds RPC latency distributions. +# TYPE rpc_durations_histogram_seconds histogram +rpc_durations_histogram_seconds_bucket{le="-0.00099"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.00089"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0007899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0006899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0005899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0004899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0003899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0002899999999999998"} 3 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09 +rpc_durations_histogram_seconds_bucket{le="-0.0001899999999999998"} 5 # {dummyID="84741"} -0.00020178290006788965 1.726839814829977e+09 +rpc_durations_histogram_seconds_bucket{le="-8.999999999999979e-05"} 5 +rpc_durations_histogram_seconds_bucket{le="1.0000000000000216e-05"} 8 # {dummyID="19206"} -4.6156147425468016e-05 1.7268398151337721e+09 +rpc_durations_histogram_seconds_bucket{le="0.00011000000000000022"} 9 # {dummyID="3974"} 9.528436760156754e-05 1.726839814526797e+09 +rpc_durations_histogram_seconds_bucket{le="0.00021000000000000023"} 11 # {dummyID="29640"} 0.00017459624183458996 1.7268398139220061e+09 +rpc_durations_histogram_seconds_bucket{le="0.0003100000000000002"} 15 # {dummyID="9818"} 0.0002791130914009552 1.7268398149821382e+09 +rpc_durations_histogram_seconds_bucket{le="0.0004100000000000002"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0005100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0006100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0007100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0008100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0009100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="+Inf"} 15 +rpc_durations_histogram_seconds_sum -8.452185437166741e-05 +rpc_durations_histogram_seconds_count 15 +rpc_durations_histogram_seconds_created 1.726839813016302e+09 +# HELP rpc_durations_seconds RPC latency distributions. +# TYPE rpc_durations_seconds summary +rpc_durations_seconds{service="exponential",quantile="0.5"} 7.689368882420941e-07 +rpc_durations_seconds{service="exponential",quantile="0.9"} 1.6537614174305048e-06 +rpc_durations_seconds{service="exponential",quantile="0.99"} 2.0965499063061924e-06 +rpc_durations_seconds_sum{service="exponential"} 2.0318666372575776e-05 +rpc_durations_seconds_count{service="exponential"} 22 +rpc_durations_seconds_created{service="exponential"} 1.7268398130168908e+09 +rpc_durations_seconds{service="normal",quantile="0.5"} -5.066758674917046e-06 +rpc_durations_seconds{service="normal",quantile="0.9"} 0.0002935723711788224 +rpc_durations_seconds{service="normal",quantile="0.99"} 0.0003023094636293776 +rpc_durations_seconds_sum{service="normal"} -8.452185437166741e-05 +rpc_durations_seconds_count{service="normal"} 15 +rpc_durations_seconds_created{service="normal"} 1.726839813016714e+09 +rpc_durations_seconds{service="uniform",quantile="0.5"} 9.005014931474918e-05 +rpc_durations_seconds{service="uniform",quantile="0.9"} 0.00017801230208182325 +rpc_durations_seconds{service="uniform",quantile="0.99"} 0.00018641524538180192 +rpc_durations_seconds_sum{service="uniform"} 0.0011666095700533677 +rpc_durations_seconds_count{service="uniform"} 11 +rpc_durations_seconds_created{service="uniform"} 1.72683981301684e+09 +# HELP rpc_requests Total number of RPC requests received. +# TYPE rpc_requests counter +rpc_requests_total{service="exponential"} 22.0 +rpc_requests_created{service="exponential"} 1.726839813016893e+09 +rpc_requests_total{service="normal"} 15.0 +rpc_requests_created{service="normal"} 1.726839813016717e+09 +rpc_requests_total{service="uniform"} 11.0 +rpc_requests_created{service="uniform"} 1.7268398130168471e+09 +# EOF diff --git a/model/textparse/promtestdata.nometa.txt b/model/textparse/testdata/promtestdata.nometa.txt similarity index 100% rename from model/textparse/promtestdata.nometa.txt rename to model/textparse/testdata/promtestdata.nometa.txt diff --git a/model/textparse/promtestdata.txt b/model/textparse/testdata/promtestdata.txt similarity index 100% rename from model/textparse/promtestdata.txt rename to model/textparse/testdata/promtestdata.txt diff --git a/notifier/notifier.go b/notifier/notifier.go index 218e4cb8c74..e970b67e6d2 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -16,25 +16,28 @@ package notifier import ( "bytes" "context" + "crypto/md5" + "encoding/hex" "encoding/json" "fmt" "io" + "log/slog" "net/http" "net/url" "path" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/go-openapi/strfmt" "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" "go.uber.org/atomic" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -117,7 +120,7 @@ type Manager struct { stopRequested chan struct{} alertmanagers map[string]*alertmanagerSet - logger log.Logger + logger *slog.Logger } // Options are the configurable parameters of a Handler. @@ -218,12 +221,12 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp } // NewManager is the manager constructor. -func NewManager(o *Options, logger log.Logger) *Manager { +func NewManager(o *Options, logger *slog.Logger) *Manager { if o.Do == nil { o.Do = do } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } n := &Manager{ @@ -257,6 +260,16 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs amSets := make(map[string]*alertmanagerSet) + // configToAlertmanagers maps alertmanager sets for each unique AlertmanagerConfig, + // helping to avoid dropping known alertmanagers and re-use them without waiting for SD updates when applying the config. + configToAlertmanagers := make(map[string]*alertmanagerSet, len(n.alertmanagers)) + for _, oldAmSet := range n.alertmanagers { + hash, err := oldAmSet.configHash() + if err != nil { + return err + } + configToAlertmanagers[hash] = oldAmSet + } for k, cfg := range conf.AlertingConfig.AlertmanagerConfigs.ToMap() { ams, err := newAlertmanagerSet(cfg, n.logger, n.metrics) @@ -264,6 +277,16 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { return err } + hash, err := ams.configHash() + if err != nil { + return err + } + + if oldAmSet, ok := configToAlertmanagers[hash]; ok { + ams.ams = oldAmSet.ams + ams.droppedAms = oldAmSet.droppedAms + } + amSets[k] = ams } @@ -319,7 +342,7 @@ func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { }() wg.Wait() - level.Info(n.logger).Log("msg", "Notification manager stopped") + n.logger.Info("Notification manager stopped") } // sendLoop continuously consumes the notifications queue and sends alerts to @@ -376,20 +399,20 @@ func (n *Manager) sendOneBatch() { func (n *Manager) drainQueue() { if !n.opts.DrainOnShutdown { if n.queueLen() > 0 { - level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) + n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) n.metrics.dropped.Add(float64(n.queueLen())) } return } - level.Info(n.logger).Log("msg", "Draining any remaining notifications...") + n.logger.Info("Draining any remaining notifications...") for n.queueLen() > 0 { n.sendOneBatch() } - level.Info(n.logger).Log("msg", "Remaining notifications drained") + n.logger.Info("Remaining notifications drained") } func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { @@ -399,7 +422,7 @@ func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { for id, tgroup := range tgs { am, ok := n.alertmanagers[id] if !ok { - level.Error(n.logger).Log("msg", "couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) + n.logger.Error("couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) continue } am.sync(tgroup) @@ -422,7 +445,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := len(alerts) - n.opts.QueueCapacity; d > 0 { alerts = alerts[d:] - level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } @@ -431,7 +454,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 { n.queue = n.queue[d:] - level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } n.queue = append(n.queue, alerts...) @@ -519,10 +542,10 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { begin := time.Now() - // v1Payload and v2Payload represent 'alerts' marshaled for Alertmanager API - // v1 or v2. Marshaling happens below. Reference here is for caching between + // cachedPayload represent 'alerts' marshaled for Alertmanager API v2. + // Marshaling happens below. Reference here is for caching between // for loop iterations. - var v1Payload, v2Payload []byte + var cachedPayload []byte n.mtx.RLock() amSets := n.alertmanagers @@ -553,42 +576,29 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { continue } // We can't use the cached values from previous iteration. - v1Payload, v2Payload = nil, nil + cachedPayload = nil } switch ams.cfg.APIVersion { - case config.AlertmanagerAPIVersionV1: - { - if v1Payload == nil { - v1Payload, err = json.Marshal(amAlerts) - if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err) - ams.mtx.RUnlock() - return false - } - } - - payload = v1Payload - } case config.AlertmanagerAPIVersionV2: { - if v2Payload == nil { + if cachedPayload == nil { openAPIAlerts := alertsToOpenAPIAlerts(amAlerts) - v2Payload, err = json.Marshal(openAPIAlerts) + cachedPayload, err = json.Marshal(openAPIAlerts) if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err) + n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err) ams.mtx.RUnlock() return false } } - payload = v2Payload + payload = cachedPayload } default: { - level.Error(n.logger).Log( - "msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), + n.logger.Error( + fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), "err", err, ) ams.mtx.RUnlock() @@ -598,7 +608,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { if len(ams.cfg.AlertRelabelConfigs) > 0 { // We can't use the cached values on the next iteration. - v1Payload, v2Payload = nil, nil + cachedPayload = nil } for _, am := range ams.ams { @@ -609,7 +619,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { if err := n.sendOne(ctx, client, url, payload); err != nil { - level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err) + n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err) n.metrics.errors.WithLabelValues(url).Inc() } else { numSuccess.Inc() @@ -689,7 +699,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b // // Stop is safe to call multiple times. func (n *Manager) Stop() { - level.Info(n.logger).Log("msg", "Stopping notification manager...") + n.logger.Info("Stopping notification manager...") n.stopOnce.Do(func() { close(n.stopRequested) @@ -724,10 +734,10 @@ type alertmanagerSet struct { mtx sync.RWMutex ams []alertmanager droppedAms []alertmanager - logger log.Logger + logger *slog.Logger } -func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { +func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") if err != nil { return nil, err @@ -761,7 +771,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { for _, tg := range tgs { ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) if err != nil { - level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) + s.logger.Error("Creating discovered Alertmanagers failed", "err", err) continue } allAms = append(allAms, ams...) @@ -770,6 +780,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { s.mtx.Lock() defer s.mtx.Unlock() + previousAms := s.ams // Set new Alertmanagers and deduplicate them along their unique URL. s.ams = []alertmanager{} s.droppedAms = []alertmanager{} @@ -789,6 +800,26 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { seen[us] = struct{}{} s.ams = append(s.ams, am) } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } +} + +func (s *alertmanagerSet) configHash() (string, error) { + b, err := yaml.Marshal(s.cfg) + if err != nil { + return "", err + } + hash := md5.Sum(b) + return hex.EncodeToString(hash[:]), nil } func postPath(pre string, v config.AlertmanagerAPIVersion) string { diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index cf922a537c0..97b0274f29c 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -26,11 +26,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -38,6 +38,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/config" + _ "github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" @@ -49,27 +50,27 @@ func TestPostPath(t *testing.T) { }{ { in: "", - out: "/api/v1/alerts", + out: "/api/v2/alerts", }, { in: "/", - out: "/api/v1/alerts", + out: "/api/v2/alerts", }, { in: "/prefix", - out: "/prefix/api/v1/alerts", + out: "/prefix/api/v2/alerts", }, { in: "/prefix//", - out: "/prefix/api/v1/alerts", + out: "/prefix/api/v2/alerts", }, { in: "prefix//", - out: "/prefix/api/v1/alerts", + out: "/prefix/api/v2/alerts", }, } for _, c := range cases { - require.Equal(t, c.out, postPath(c.in, config.AlertmanagerAPIVersionV1)) + require.Equal(t, c.out, postPath(c.in, config.AlertmanagerAPIVersionV2)) } } @@ -743,7 +744,7 @@ func TestHangingNotifier(t *testing.T) { // Initialize the discovery manager // This is relevant as the updates aren't sent continually in real life, but only each updatert. - // The old implementation of TestHangingNotifier didn't take that into acount. + // The old implementation of TestHangingNotifier didn't take that into account. ctx, cancel := context.WithCancel(context.Background()) defer cancel() reg := prometheus.NewRegistry() @@ -751,7 +752,7 @@ func TestHangingNotifier(t *testing.T) { require.NoError(t, err) sdManager := discovery.NewManager( ctx, - log.NewNopLogger(), + promslog.NewNopLogger(), reg, sdMetrics, discovery.Name("sd-manager"), @@ -1017,3 +1018,107 @@ func TestStop_DrainingEnabled(t *testing.T) { require.Equal(t, int64(2), alertsReceived.Load()) } + +func TestApplyConfig(t *testing.T) { + targetURL := "alertmanager:9093" + targetGroup := &targetgroup.Group{ + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue(targetURL), + }, + }, + } + alertmanagerURL := fmt.Sprintf("http://%s/api/v2/alerts", targetURL) + + n := NewManager(&Options{}, nil) + cfg := &config.Config{} + s := ` +alerting: + alertmanagers: + - file_sd_configs: + - files: + - foo.json +` + // 1. Ensure known alertmanagers are not dropped during ApplyConfig. + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) + + // First, apply the config and reload. + require.NoError(t, n.ApplyConfig(cfg)) + tgs := map[string][]*targetgroup.Group{"config-0": {targetGroup}} + n.reload(tgs) + require.Len(t, n.Alertmanagers(), 1) + require.Equal(t, alertmanagerURL, n.Alertmanagers()[0].String()) + + // Reapply the config. + require.NoError(t, n.ApplyConfig(cfg)) + // Ensure the known alertmanagers are not dropped. + require.Len(t, n.Alertmanagers(), 1) + require.Equal(t, alertmanagerURL, n.Alertmanagers()[0].String()) + + // 2. Ensure known alertmanagers are not dropped during ApplyConfig even when + // the config order changes. + s = ` +alerting: + alertmanagers: + - static_configs: + - file_sd_configs: + - files: + - foo.json +` + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 2) + + require.NoError(t, n.ApplyConfig(cfg)) + require.Len(t, n.Alertmanagers(), 1) + // Ensure no unnecessary alertmanagers are injected. + require.Empty(t, n.alertmanagers["config-0"].ams) + // Ensure the config order is taken into account. + ams := n.alertmanagers["config-1"].ams + require.Len(t, ams, 1) + require.Equal(t, alertmanagerURL, ams[0].url().String()) + + // 3. Ensure known alertmanagers are reused for new config with identical AlertmanagerConfig. + s = ` +alerting: + alertmanagers: + - file_sd_configs: + - files: + - foo.json + - file_sd_configs: + - files: + - foo.json +` + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 2) + + require.NoError(t, n.ApplyConfig(cfg)) + require.Len(t, n.Alertmanagers(), 2) + for cfgIdx := range 2 { + ams := n.alertmanagers[fmt.Sprintf("config-%d", cfgIdx)].ams + require.Len(t, ams, 1) + require.Equal(t, alertmanagerURL, ams[0].url().String()) + } + + // 4. Ensure known alertmanagers are reused only for identical AlertmanagerConfig. + s = ` +alerting: + alertmanagers: + - file_sd_configs: + - files: + - foo.json + path_prefix: /bar + - file_sd_configs: + - files: + - foo.json + relabel_configs: + - source_labels: ['__address__'] + regex: 'doesntmatter:1234' + action: drop +` + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 2) + + require.NoError(t, n.ApplyConfig(cfg)) + require.Empty(t, n.Alertmanagers()) +} diff --git a/promql/bench_test.go b/promql/bench_test.go index a7817bd8498..943baceecb5 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -119,7 +119,7 @@ func rangeQueryCases() []benchCase { }, // Holt-Winters and long ranges. { - expr: "holt_winters(a_X[1d], 0.3, 0.3)", + expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)", }, { expr: "changes(a_X[1d])", diff --git a/promql/engine.go b/promql/engine.go index 6cc4c69cf17..eecb4bcc4e7 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "reflect" "runtime" @@ -30,10 +31,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -43,6 +43,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" @@ -125,7 +126,8 @@ type QueryEngine interface { // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Log(...interface{}) error + Log(context.Context, slog.Level, string, ...any) + With(args ...any) Close() error } @@ -288,7 +290,7 @@ type QueryTracker interface { // EngineOpts contains configuration options used when creating a new Engine. type EngineOpts struct { - Logger log.Logger + Logger *slog.Logger Reg prometheus.Registerer MaxSamples int Timeout time.Duration @@ -326,7 +328,7 @@ type EngineOpts struct { // Engine handles the lifetime of queries from beginning to end. // It is connected to a querier. type Engine struct { - logger log.Logger + logger *slog.Logger metrics *engineMetrics timeout time.Duration maxSamplesPerQuery int @@ -344,7 +346,7 @@ type Engine struct { // NewEngine returns a new engine. func NewEngine(opts EngineOpts) *Engine { if opts.Logger == nil { - opts.Logger = log.NewNopLogger() + opts.Logger = promslog.NewNopLogger() } queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{ @@ -403,7 +405,7 @@ func NewEngine(opts EngineOpts) *Engine { if opts.LookbackDelta == 0 { opts.LookbackDelta = defaultLookbackDelta if l := opts.Logger; l != nil { - level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) + l.Debug("Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) } } @@ -455,7 +457,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // not make reload fail; only log a warning. err := ng.queryLogger.Close() if err != nil { - level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) + ng.logger.Warn("Error while closing the previous query log file", "err", err) } } @@ -645,10 +647,10 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota f = append(f, k, v) } } - if err := l.Log(f...); err != nil { - ng.metrics.queryLogFailures.Inc() - level.Error(ng.logger).Log("msg", "can't log query", "err", err) - } + l.Log(context.Background(), slog.LevelInfo, "promql query logged", f...) + // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? + // ng.metrics.queryLogFailures.Inc() + // ng.logger.Error("can't log query", "err", err) } ng.queryLoggerLock.RUnlock() }() @@ -911,11 +913,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path } if evalRange == 0 { - start -= durationMilliseconds(s.LookbackDelta) + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(s.LookbackDelta) - 1 } else { - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - start -= durationMilliseconds(evalRange) + // For all matrix queries we want to ensure that we have + // (end-start) + range selected this way we have `range` data + // before the start time. We subtract one from the range to + // exclude samples positioned directly at the lower boundary of + // the range. + start -= durationMilliseconds(evalRange) - 1 } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) @@ -1055,7 +1063,7 @@ type evaluator struct { maxSamples int currentSamples int - logger log.Logger + logger *slog.Logger lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 @@ -1086,7 +1094,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + ev.logger.Error("runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err @@ -1513,7 +1521,7 @@ func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() val, ws := ev.eval(ctx, subq) - // But do incorporate the peak from the subquery + // But do incorporate the peak from the subquery. samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats mat := val.(Matrix) @@ -1795,9 +1803,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, ev.samplesStats.UpdatePeak(ev.currentSamples) if e.Func.Name == "rate" || e.Func.Name == "increase" { - samples := inMatrix[0] - metricName := samples.Metric.Get(labels.MetricName) - if metricName != "" && len(samples.Floats) > 0 && + metricName := inMatrix[0].Metric.Get(labels.MetricName) + if metricName != "" && len(ss.Floats) > 0 && !strings.HasSuffix(metricName, "_total") && !strings.HasSuffix(metricName, "_sum") && !strings.HasSuffix(metricName, "_count") && @@ -1913,20 +1920,20 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, }, e.LHS, e.RHS) default: return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } @@ -1979,7 +1986,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // Start with the first timestamp after (ev.startTimestamp - offset - range) // that is aligned with the step (multiple of 'newEv.interval'). newEv.startTimestamp = newEv.interval * ((ev.startTimestamp - offsetMillis - rangeMillis) / newEv.interval) - if newEv.startTimestamp < (ev.startTimestamp - offsetMillis - rangeMillis) { + if newEv.startTimestamp <= (ev.startTimestamp - offsetMillis - rangeMillis) { newEv.startTimestamp += newEv.interval } @@ -2099,7 +2106,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) for i, s := range vs.Series { it := s.Iterator(nil) - seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { @@ -2161,7 +2168,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, of if valueType == chunkenc.ValNone || t > refTime { var ok bool t, v, h, ok = it.PeekPrev() - if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { + if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } } @@ -2295,20 +2302,20 @@ func (ev *evaluator) matrixIterSlice( mintFloats, mintHistograms := mint, mint // First floats... - if len(floats) > 0 && floats[len(floats)-1].T >= mint { + if len(floats) > 0 && floats[len(floats)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T <= mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mintFloats = floats[len(floats)-1].T + 1 + mintFloats = floats[len(floats)-1].T } else { ev.currentSamples -= len(floats) if floats != nil { @@ -2317,14 +2324,14 @@ func (ev *evaluator) matrixIterSlice( } // ...then the same for histograms. TODO(beorn7): Use generics? - if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + if len(histograms) > 0 && histograms[len(histograms)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T <= mint; drop++ { } // Rotate the buffer around the drop index so that points before mint can be // reused to store new histograms. @@ -2335,7 +2342,7 @@ func (ev *evaluator) matrixIterSlice( histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. - mintHistograms = histograms[len(histograms)-1].T + 1 + mintHistograms = histograms[len(histograms)-1].T } else { ev.currentSamples -= totalHPointSize(histograms) if histograms != nil { @@ -2359,7 +2366,7 @@ loop: case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintHistograms { + if t > mintHistograms { if histograms == nil { histograms = getMatrixSelectorHPoints() } @@ -2385,7 +2392,7 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintFloats { + if t > mintFloats { ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -2520,7 +2527,7 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi } // VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { if matching.Card == parser.CardManyToMany { panic("many-to-many only allowed for set operators") } @@ -2594,7 +2601,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * fl, fr = fr, fl hl, hr = hr, hl } - floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr) + floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr, pos) if err != nil { lastErr = err } @@ -2703,7 +2710,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { var lastErr error for _, lhsSample := range lhs { lf, rf := lhsSample.F, rhs.V @@ -2715,7 +2722,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lf, rf = rf, lf lh, rh = rh, lh } - float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh) + float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh, pos) if err != nil { lastErr = err } @@ -2782,7 +2789,7 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (float64, *histogram.FloatHistogram, bool, error) { switch op { case parser.ADD: if hlhs != nil && hrhs != nil { @@ -2792,7 +2799,13 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram } return 0, res.Compact(0), true, nil } - return lhs + rhs, nil, true, nil + if hlhs == nil && hrhs == nil { + return lhs + rhs, nil, true, nil + } + if hlhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "+", "float", pos) + } + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "+", "histogram", pos) case parser.SUB: if hlhs != nil && hrhs != nil { res, err := hlhs.Copy().Sub(hrhs) @@ -2801,7 +2814,13 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram } return 0, res.Compact(0), true, nil } - return lhs - rhs, nil, true, nil + if hlhs == nil && hrhs == nil { + return lhs - rhs, nil, true, nil + } + if hlhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "-", "float", pos) + } + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "-", "histogram", pos) case parser.MUL: if hlhs != nil && hrhs == nil { return 0, hlhs.Copy().Mul(rhs), true, nil @@ -2809,11 +2828,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram if hlhs == nil && hrhs != nil { return 0, hrhs.Copy().Mul(lhs), true, nil } + if hlhs != nil && hrhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "*", "histogram", pos) + } return lhs * rhs, nil, true, nil case parser.DIV: if hlhs != nil && hrhs == nil { return 0, hlhs.Copy().Div(rhs), true, nil } + if hrhs != nil { + if hlhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "/", "histogram", pos) + } + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "/", "histogram", pos) + } return lhs / rhs, nil, true, nil case parser.POW: return math.Pow(lhs, rhs), nil, true, nil @@ -2890,7 +2918,15 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: - group.floatValue = 0 + switch { + case h != nil: + // Ignore histograms for STDVAR and STDDEV. + group.seen = false + case math.IsNaN(f), math.IsInf(f, 0): + group.floatValue = math.NaN() + default: + group.floatValue = 0 + } case parser.QUANTILE: group.heap = make(vectorByValueHeap, 1) group.heap[0] = Sample{F: f} @@ -3351,6 +3387,9 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat } metricName := "" pos := e.PositionRange() + if errors.Is(err, annotations.PromQLInfo) || errors.Is(err, annotations.PromQLWarning) { + return annotations.New().Add(err) + } if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go index cb501b2fdf0..0962c218c7f 100644 --- a/promql/engine_internal_test.go +++ b/promql/engine_internal_test.go @@ -14,22 +14,21 @@ package promql import ( + "bytes" "errors" "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/annotations" ) func TestRecoverEvaluatorRuntime(t *testing.T) { - var output []interface{} - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = append(output, keyvals...) - return nil - })) + var output bytes.Buffer + logger := promslog.New(&promslog.Config{Writer: &output}) ev := &evaluator{logger: logger} expr, _ := parser.ParseExpr("sum(up)") @@ -38,7 +37,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) { defer func() { require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") - require.Contains(t, output, "sum(up)") + require.Contains(t, output.String(), "sum(up)") }() defer ev.recover(expr, nil, &err) @@ -48,7 +47,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) { } func TestRecoverEvaluatorError(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} + ev := &evaluator{logger: promslog.NewNopLogger()} var err error e := errors.New("custom error") @@ -62,7 +61,7 @@ func TestRecoverEvaluatorError(t *testing.T) { } func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} + ev := &evaluator{logger: promslog.NewNopLogger()} var err error var ws annotations.Annotations diff --git a/promql/engine_test.go b/promql/engine_test.go index 8da27a73c5c..59d8503e8e0 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,8 +17,11 @@ import ( "context" "errors" "fmt" + "log/slog" + "math" "sort" "strconv" + "strings" "sync" "testing" "time" @@ -28,11 +31,13 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/teststorage" @@ -321,271 +326,271 @@ func TestSelectHintsSetCorrectly(t *testing.T) { { query: "foo", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000}, + {Start: 5001, End: 10000}, }, }, { query: "foo @ 15", start: 10000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000}, + {Start: 10001, End: 15000}, }, }, { query: "foo @ 1", start: 10000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000}, + {Start: -3999, End: 1000}, }, }, { query: "foo[2m]", start: 200000, expected: []*storage.SelectHints{ - {Start: 80000, End: 200000, Range: 120000}, + {Start: 80001, End: 200000, Range: 120000}, }, }, { query: "foo[2m] @ 180", start: 200000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 300", start: 200000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000}, + {Start: 180001, End: 300000, Range: 120000}, }, }, { query: "foo[2m] @ 60", start: 200000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000}, + {Start: -59999, End: 60000, Range: 120000}, }, }, { query: "foo[2m] offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 200 offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: -40000, End: 80000, Range: 120000}, + {Start: -39999, End: 80000, Range: 120000}, }, }, { query: "foo[2m:1s]", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Step: 1000}, + {Start: 175001, End: 300000, Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 165000, End: 290000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 290000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 155000, End: 280000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 280000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "foo", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 5000, End: 20000, Step: 1000}, + {Start: 5001, End: 20000, Step: 1000}, }, }, { query: "foo @ 15", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000, Step: 1000}, + {Start: 10001, End: 15000, Step: 1000}, }, }, { query: "foo @ 1", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000, Step: 1000}, + {Start: -3999, End: 1000, Step: 1000}, }, }, { query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 180000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 180001, End: 300000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, + {Start: -59999, End: 60000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m])", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 80001, End: 500000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 380000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "rate", Step: 1000}, + {Start: 175001, End: 500000, Func: "rate", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 500000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 490000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 480000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "sum by (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, }, }, { query: "sum without (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, }, }, { query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, expected: []*storage.SelectHints{ - {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, + {Start: 9001, End: 10000, Func: "avg_over_time", Range: 1000}, }, }, { query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, + {Start: 5001, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, }, }, { query: "(max by (dim1) (foo))[5s:1s]", start: 10000, expected: []*storage.SelectHints{ - {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, + {Start: 1, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, }, }, { query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, expected: []*storage.SelectHints{ - {Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000}, - {Start: 95000, End: 120000, Func: "max", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "sum", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "max", By: true, Step: 5000}, }, }, { query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 48001, End: 50000, Step: 1000, Func: "rate", Range: 2000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 655001, End: 780000, Step: 1000, Func: "rate"}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, expected: []*storage.SelectHints{ - {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: -149999, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, expected: []*storage.SelectHints{ - {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: 2800001, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, } { @@ -935,22 +940,20 @@ load 10s }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 10, - TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples - // as if we run a query on 00 looking back 60 seconds we will return 7 samples; - // see next test). + TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 24, }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 11, TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + // max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples. TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 26, }, @@ -959,10 +962,9 @@ load 10s Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 78, - TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + TotalSamples: 312, // (1 histogram (size 13) / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 338, + 201000: 312, }, }, { @@ -1427,23 +1429,23 @@ load 10s }, { // The peak samples in memory is during the first evaluation: - // - Subquery takes 22 samples, 11 for each bigmetric, - // - Result is calculated per series where the series samples is buffered, hence 11 more here. + // - Subquery takes 20 samples, 10 for each bigmetric. + // - Result is calculated per series where the series samples is buffered, hence 10 more here. // - The result of two series is added before the last series buffer is discarded, so 2 more here. - // Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series). + // Hence at peak it is 20 (subquery) + 10 (buffer of a series) + 2 (result from 2 series). // The subquery samples and the buffer is discarded before duplicating. Query: `rate(bigmetric[10s:1s] @ 10)`, - MaxSamples: 35, + MaxSamples: 32, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, }, { // Here the reasoning is same as above. But LHS and RHS are done one after another. - // So while one of them takes 35 samples at peak, we need to hold the 2 sample + // So while one of them takes 32 samples at peak, we need to hold the 2 sample // result of the other till then. Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 37, + MaxSamples: 34, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, @@ -1451,28 +1453,28 @@ load 10s { // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. - // Hence at peak it is 2*21 (subquery from 0s to 20s) - // + 11 (buffer of a series per evaluation) + // Hence at peak it is 2*20 (subquery from 0s to 20s) + // + 10 (buffer of a series per evaluation) // + 6 (result from 2 series at 3 eval times). Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 59, + MaxSamples: 56, Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, }, { // Nested subquery. - // We saw that innermost rate takes 35 samples which is still the peak + // We saw that innermost rate takes 32 samples which is still the peak // since the other two subqueries just duplicate the result. - Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, - MaxSamples: 35, + Query: `rate(rate(bigmetric[10:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, + MaxSamples: 32, Start: time.Unix(10, 0), }, { // Nested subquery. - // Now the outmost subquery produces more samples than inner most rate. + // Now the outermost subquery produces more samples than innermost rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, - MaxSamples: 36, + MaxSamples: 34, Start: time.Unix(10, 0), }, } @@ -1579,11 +1581,11 @@ load 1ms start: 10, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, + Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, promql.Series{ - Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, + Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1592,7 +1594,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, + Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1601,7 +1603,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, + Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, @@ -1610,13 +1612,26 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, + Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, }, { query: "metric[100s:25s] @ 300", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, + Metric: lbls1, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, + Metric: lbls2, + }, + }, + }, { + query: "metric[100s1ms:25s] @ 300", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, @@ -1630,6 +1645,15 @@ load 1ms }, { query: "metric_neg[50s:25s] @ 0", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 26, T: -25000}, {F: 1, T: 0}}, + Metric: lblsneg, + }, + }, + }, { + query: "metric_neg[50s1ms:25s] @ 0", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, @@ -1639,6 +1663,15 @@ load 1ms }, { query: "metric_neg[50s:25s] @ -100", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 126, T: -125000}, {F: 101, T: -100000}}, + Metric: lblsneg, + }, + }, + }, { + query: "metric_neg[50s1ms:25s] @ -100", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, @@ -1646,7 +1679,7 @@ load 1ms }, }, }, { - query: `metric_ms[100ms:25ms] @ 2.345`, + query: `metric_ms[101ms:25ms] @ 2.345`, start: 100, result: promql.Matrix{ promql.Series{ @@ -1831,7 +1864,7 @@ func TestSubquerySelector(t *testing.T) { nil, promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1878,6 +1911,20 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { // Normal selector. Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, + }, + nil, + }, + Start: time.Unix(10020, 0), + }, + { // Normal selector. Add 1ms to the range to see the legacy behavior of the previous test. + Query: `http_requests{group=~"pro.*",instance="0"}[30s1ms:10s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -1920,6 +1967,36 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `rate(http_requests[1m])[15s:5s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, + }, + }, + nil, + }, + Start: time.Unix(8000, 0), + }, + { + Query: `rate(http_requests[1m])[15s1ms:5s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ nil, promql.Matrix{ @@ -1950,6 +2027,35 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(121, 0), // 1s later doesn't change the result. + }, + { + // Add 1ms to the range to see the legacy behavior of the previous test. + Query: `sum(http_requests{group=~"pro.*"})[30s1ms:10s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -1964,6 +2070,20 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests)[40s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + Query: `sum(http_requests)[40s1ms:10s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ nil, promql.Matrix{ @@ -1978,6 +2098,21 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + // Add 1ms to the range to see the legacy behavior of the previous test. + Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s1ms:5s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -2017,23 +2152,37 @@ func TestSubquerySelector(t *testing.T) { type FakeQueryLogger struct { closed bool logs []interface{} + attrs []any } func NewFakeQueryLogger() *FakeQueryLogger { return &FakeQueryLogger{ closed: false, logs: make([]interface{}, 0), + attrs: make([]any, 0), } } +// It implements the promql.QueryLogger interface. func (f *FakeQueryLogger) Close() error { f.closed = true return nil } -func (f *FakeQueryLogger) Log(l ...interface{}) error { - f.logs = append(f.logs, l...) - return nil +// It implements the promql.QueryLogger interface. +func (f *FakeQueryLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + // Test usage only really cares about existence of keyvals passed in + // via args, just append in the log message before handling the + // provided args and any embedded kvs added via `.With()` on f.attrs. + log := append([]any{msg}, args...) + log = append(log, f.attrs...) + f.attrs = f.attrs[:0] + f.logs = append(f.logs, log...) +} + +// It implements the promql.QueryLogger interface. +func (f *FakeQueryLogger) With(args ...any) { + f.attrs = append(f.attrs, args...) } func TestQueryLogger_basic(t *testing.T) { @@ -2061,9 +2210,8 @@ func TestQueryLogger_basic(t *testing.T) { f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) queryExec() - for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}} { - require.Equal(t, field, f1.logs[i]) - } + require.Contains(t, f1.logs, `params`) + require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"}) l := len(f1.logs) queryExec() @@ -2109,11 +2257,8 @@ func TestQueryLogger_fields(t *testing.T) { res := query.Exec(ctx) require.NoError(t, res.Err) - expected := []string{"foo", "bar"} - for i, field := range expected { - v := f1.logs[len(f1.logs)-len(expected)+i].(string) - require.Equal(t, field, v) - } + require.Contains(t, f1.logs, `foo`) + require.Contains(t, f1.logs, `bar`) } func TestQueryLogger_error(t *testing.T) { @@ -2139,9 +2284,10 @@ func TestQueryLogger_error(t *testing.T) { res := query.Exec(ctx) require.Error(t, res.Err, "query should have failed") - for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}, "error", testErr} { - require.Equal(t, f1.logs[i], field) - } + require.Contains(t, f1.logs, `params`) + require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"}) + require.Contains(t, f1.logs, `error`) + require.Contains(t, f1.logs, testErr) } func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { @@ -3060,7 +3206,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { ts time.Time }{ "matches series with points in range": { - expr: "some_metric[1m]", + expr: "some_metric[2m]", ts: baseT.Add(2 * time.Minute), expected: promql.Matrix{ { @@ -3096,7 +3242,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { { Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"), Floats: []promql.FPoint{ - {T: timestamp.FromTime(baseT), F: 0}, {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, {T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3}, }, @@ -3118,6 +3263,217 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { } } +func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + cases := []struct { + histograms []histogram.Histogram + expected histogram.FloatHistogram + expectedAvg histogram.FloatHistogram + }{ + { + histograms: []histogram.Histogram{ + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 25, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 4, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{2, 2, -3, 8}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 41, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 41, + Sum: 1111.1, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers. + }, + }, + expected: histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + ZeroThreshold: 0.001, + ZeroCount: 14, + Count: 107, + Sum: 4691.2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 7}, + }, + PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 6}, + {Offset: 3, Length: 3}, + }, + NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, + }, + expectedAvg: histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + ZeroThreshold: 0.001, + ZeroCount: 3.5, + Count: 26.75, + Sum: 1172.8, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 7}, + }, + PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 6}, + {Offset: 3, Length: 3}, + }, + NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1}, + }, + }, + } + + idx0 := int64(0) + for _, c := range cases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + seriesName := "sparse_histogram_series" + seriesNameOverTime := "sparse_histogram_series_over_time" + + engine := newTestEngine(t) + + ts := idx0 * int64(10*time.Minute/time.Millisecond) + app := storage.Appender(context.Background()) + _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) + require.NoError(t, err) + for idx1, h := range c.histograms { + lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) + // Since we mutate h later, we need to create a copy here. + var err error + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) + } + require.NoError(t, err) + + lbls = labels.FromStrings("__name__", seriesNameOverTime) + newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil)) + } else { + _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) + } + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + queryAndCheck := func(queryString string, ts int64, exp promql.Vector) { + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + require.Empty(t, res.Warnings) + + vector, err := res.Vector() + require.NoError(t, err) + + testutil.RequireEqual(t, exp, vector) + } + queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) { + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + require.Equal(t, expWarnings, res.Warnings) + } + + // sum(). + queryString := fmt.Sprintf("sum(%s)", seriesName) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + + queryString = `sum({idx="0"})` + var annos annotations.Annotations + annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13})) + queryAndCheckAnnotations(queryString, ts, annos) + + // + operator. + queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) + for idx := 1; idx < len(c.histograms); idx++ { + queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) + } + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + + // count(). + queryString = fmt.Sprintf("count(%s)", seriesName) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) + + // avg(). + queryString = fmt.Sprintf("avg(%s)", seriesName) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + + offset := int64(len(c.histograms) - 1) + newTs := ts + offset*int64(time.Minute/time.Millisecond) + + // sum_over_time(). + queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels(), DropName: true}}) + + // avg_over_time(). + queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels(), DropName: true}}) + }) + idx0++ + } + } +} + func TestNativeHistogram_SubOperator(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. @@ -3370,43 +3726,43 @@ metric 0 1 2 }{ { name: "default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta), + ts: lastDatapointTs.Add(defaultLookbackDelta - time.Millisecond), expectSamples: true, }, { name: "outside default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond), + ts: lastDatapointTs.Add(defaultLookbackDelta), expectSamples: false, }, { name: "custom engine lookback delta", - ts: lastDatapointTs.Add(10 * time.Minute), + ts: lastDatapointTs.Add(10*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, expectSamples: true, }, { name: "outside custom engine lookback delta", - ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(10 * time.Minute), engineLookback: 10 * time.Minute, expectSamples: false, }, { name: "custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, }, { name: "outside custom query lookback delta", - ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(20 * time.Minute), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: false, }, { name: "negative custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: -10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, @@ -3473,18 +3829,18 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum: } } - qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute) + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[90s])", time.Unix(0, 0), time.Unix(0, 0).Add(60*time.Second), time.Minute) require.NoError(t, err) verify(t, qry, []histogram.FloatHistogram{ { - Count: 2, - Sum: 2, // Increase from 4 to 6 is 2. + Count: 3, + Sum: 3, // Increase from 4 to 6 is 2. Interpolation adds 1. PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram. - PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets. + PositiveBuckets: []float64{1.5, 1.5}, // Increase from 2 to 3 is 1 in both buckets. Interpolation adds 0.5. }, }) - qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute)) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[61s]", time.Unix(0, 0).Add(2*time.Minute)) require.NoError(t, err) verify(t, qry, []histogram.FloatHistogram{ { @@ -3502,64 +3858,186 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum: }) } -func TestEvaluationWithDelayedNameRemovalDisabled(t *testing.T) { - opts := promql.EngineOpts{ - Logger: nil, - Reg: nil, - EnableAtModifier: true, - MaxSamples: 10000, - Timeout: 10 * time.Second, - EnableDelayedNameRemoval: false, +func TestRateAnnotations(t *testing.T) { + testCases := map[string]struct { + data string + expr string + expectedWarningAnnotations []string + expectedInfoAnnotations []string + }{ + "info annotation when two samples are selected": { + data: ` + series 1 2 + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`, + }, + }, + "no info annotations when no samples": { + data: ` + series + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting one sample": { + data: ` + series 1 2 + `, + expr: "rate(series[10s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when no samples due to mixed data types": { + data: ` + series{label="a"} 1 {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{ + `PromQL warning: encountered a mix of histograms and floats for metric name "series" (1:6)`, + }, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting two native histograms": { + data: ` + series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, } - engine := promqltest.NewTestEngineWithOpts(t, opts) + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data)) + t.Cleanup(func() { _ = store.Close() }) + + engine := newTestEngine(t) + query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute)) + require.NoError(t, err) + t.Cleanup(query.Close) + + res := query.Exec(context.Background()) + require.NoError(t, res.Err) - promqltest.RunTest(t, ` -load 5m - metric{env="1"} 0 60 120 - another_metric{env="1"} 60 120 180 + warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0) + testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings) + testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos) + }) + } +} -# Does not drop __name__ for vector selector -eval instant at 15m metric{env="1"} - metric{env="1"} 120 +func TestHistogramRateWithFloatStaleness(t *testing.T) { + // Make a chunk with two normal histograms of the same value. + h1 := histogram.Histogram{ + Schema: 2, + Count: 10, + Sum: 100, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{100}, + } -# Drops __name__ for unary operators -eval instant at 15m -metric - {env="1"} -120 + c1 := chunkenc.NewHistogramChunk() + app, err := c1.Appender() + require.NoError(t, err) + var ( + newc chunkenc.Chunk + recoded bool + ) -# Drops __name__ for binary operators -eval instant at 15m metric + another_metric - {env="1"} 300 + newc, recoded, app, err = app.AppendHistogram(nil, 0, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) -# Does not drop __name__ for binary comparison operators -eval instant at 15m metric <= another_metric - metric{env="1"} 120 + newc, recoded, _, err = app.AppendHistogram(nil, 10, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) -# Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 15m metric <= bool another_metric - {env="1"} 1 + // Make a chunk with a single float stale marker. + c2 := chunkenc.NewXORChunk() + app, err = c2.Appender() + require.NoError(t, err) -# Drops __name__ for vector-scalar operations -eval instant at 15m metric * 2 - {env="1"} 240 + app.Append(20, math.Float64frombits(value.StaleNaN)) -# Drops __name__ for instant-vector functions -eval instant at 15m clamp(metric, 0, 100) - {env="1"} 100 + // Make a chunk with two normal histograms that have zero value. + h2 := histogram.Histogram{ + Schema: 2, + } -# Drops __name__ for round function -eval instant at 15m round(metric) - {env="1"} 120 + c3 := chunkenc.NewHistogramChunk() + app, err = c3.Appender() + require.NoError(t, err) -# Drops __name__ for range-vector functions -eval instant at 15m rate(metric{env="1"}[10m]) - {env="1"} 0.2 + newc, recoded, app, err = app.AppendHistogram(nil, 30, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 40, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + querier := storage.MockQuerier{ + SelectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { + return &singleSeriesSet{ + series: mockSeries{chunks: []chunkenc.Chunk{c1, c2, c3}, labelSet: []string{"__name__", "foo"}}, + } + }, + } -# Does not drop __name__ for last_over_time function -eval instant at 15m last_over_time(metric{env="1"}[10m]) - metric{env="1"} 120 + queriable := storage.MockQueryable{MockQuerier: &querier} -# Drops name for other _over_time functions -eval instant at 15m max_over_time(metric{env="1"}[10m]) - {env="1"} 120 -`, engine) + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + q, err := engine.NewInstantQuery(context.Background(), &queriable, nil, "rate(foo[40s])", timestamp.Time(45)) + require.NoError(t, err) + defer q.Close() + + res := q.Exec(context.Background()) + require.NoError(t, res.Err) + + vec, err := res.Vector() + require.NoError(t, err) + + // Single sample result. + require.Len(t, vec, 1) + // The result is a histogram. + require.NotNil(t, vec[0].H) + // The result should be zero as the histogram has not increased, so the rate is zero. + require.Equal(t, 0.0, vec[0].H.Count) + require.Equal(t, 0.0, vec[0].H.Sum) +} + +type singleSeriesSet struct { + series storage.Series + consumed bool +} + +func (s *singleSeriesSet) Next() bool { c := s.consumed; s.consumed = true; return !c } +func (s singleSeriesSet) At() storage.Series { return s.series } +func (s singleSeriesSet) Err() error { return nil } +func (s singleSeriesSet) Warnings() annotations.Annotations { return nil } + +type mockSeries struct { + chunks []chunkenc.Chunk + labelSet []string +} + +func (s mockSeries) Labels() labels.Labels { + return labels.FromStrings(s.labelSet...) +} + +func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + iterables := []chunkenc.Iterator{} + for _, c := range s.chunks { + iterables = append(iterables, c.Iterator(nil)) + } + return storage.ChainSampleIteratorFromIterators(it, iterables) } diff --git a/promql/functions.go b/promql/functions.go index a509f783faf..f9af4fbe092 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -350,7 +350,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -533,6 +533,10 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper toNearestInverse := 1.0 / toNearest for _, el := range vec { + if el.H != nil { + // Process only float samples. + continue + } f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropMetricName() @@ -1465,7 +1469,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio regexStr = stringFromArg(args[4]) ) - regex, err := regexp.Compile("^(?:" + regexStr + ")$") + regex, err := regexp.Compile("^(?s:" + regexStr + ")$") if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } @@ -1499,11 +1503,6 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio return matrix, ws } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // === Vector(s Scalar) (Vector, Annotations) === func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, @@ -1555,11 +1554,6 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) return matrix, ws } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // Common code for date related functions. func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { if len(vals) == 0 { @@ -1642,83 +1636,83 @@ func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // FunctionCalls is a list of all functions supported by PromQL, including their types. var FunctionCalls = map[string]FunctionCall{ - "abs": funcAbs, - "absent": funcAbsent, - "absent_over_time": funcAbsentOverTime, - "acos": funcAcos, - "acosh": funcAcosh, - "asin": funcAsin, - "asinh": funcAsinh, - "atan": funcAtan, - "atanh": funcAtanh, - "avg_over_time": funcAvgOverTime, - "ceil": funcCeil, - "changes": funcChanges, - "clamp": funcClamp, - "clamp_max": funcClampMax, - "clamp_min": funcClampMin, - "cos": funcCos, - "cosh": funcCosh, - "count_over_time": funcCountOverTime, - "days_in_month": funcDaysInMonth, - "day_of_month": funcDayOfMonth, - "day_of_week": funcDayOfWeek, - "day_of_year": funcDayOfYear, - "deg": funcDeg, - "delta": funcDelta, - "deriv": funcDeriv, - "exp": funcExp, - "floor": funcFloor, - "histogram_avg": funcHistogramAvg, - "histogram_count": funcHistogramCount, - "histogram_fraction": funcHistogramFraction, - "histogram_quantile": funcHistogramQuantile, - "histogram_sum": funcHistogramSum, - "histogram_stddev": funcHistogramStdDev, - "histogram_stdvar": funcHistogramStdVar, - "holt_winters": funcHoltWinters, - "hour": funcHour, - "idelta": funcIdelta, - "increase": funcIncrease, - "info": nil, - "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, - "ln": funcLn, - "log10": funcLog10, - "log2": funcLog2, - "last_over_time": funcLastOverTime, - "mad_over_time": funcMadOverTime, - "max_over_time": funcMaxOverTime, - "min_over_time": funcMinOverTime, - "minute": funcMinute, - "month": funcMonth, - "pi": funcPi, - "predict_linear": funcPredictLinear, - "present_over_time": funcPresentOverTime, - "quantile_over_time": funcQuantileOverTime, - "rad": funcRad, - "rate": funcRate, - "resets": funcResets, - "round": funcRound, - "scalar": funcScalar, - "sgn": funcSgn, - "sin": funcSin, - "sinh": funcSinh, - "sort": funcSort, - "sort_desc": funcSortDesc, - "sort_by_label": funcSortByLabel, - "sort_by_label_desc": funcSortByLabelDesc, - "sqrt": funcSqrt, - "stddev_over_time": funcStddevOverTime, - "stdvar_over_time": funcStdvarOverTime, - "sum_over_time": funcSumOverTime, - "tan": funcTan, - "tanh": funcTanh, - "time": funcTime, - "timestamp": funcTimestamp, - "vector": funcVector, - "year": funcYear, + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "acos": funcAcos, + "acosh": funcAcosh, + "asin": funcAsin, + "asinh": funcAsinh, + "atan": funcAtan, + "atanh": funcAtanh, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp": funcClamp, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "cos": funcCos, + "cosh": funcCosh, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "day_of_year": funcDayOfYear, + "deg": funcDeg, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_avg": funcHistogramAvg, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, + "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, + "double_exponential_smoothing": funcDoubleExponentialSmoothing, + "hour": funcHour, + "idelta": funcIdelta, + "increase": funcIncrease, + "info": nil, + "irate": funcIrate, + "label_replace": nil, // evalLabelReplace not called via this map. + "label_join": nil, // evalLabelJoin not called via this map. + "ln": funcLn, + "log10": funcLog10, + "log2": funcLog2, + "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, + "max_over_time": funcMaxOverTime, + "min_over_time": funcMinOverTime, + "minute": funcMinute, + "month": funcMonth, + "pi": funcPi, + "predict_linear": funcPredictLinear, + "present_over_time": funcPresentOverTime, + "quantile_over_time": funcQuantileOverTime, + "rad": funcRad, + "rate": funcRate, + "resets": funcResets, + "round": funcRound, + "scalar": funcScalar, + "sgn": funcSgn, + "sin": funcSin, + "sinh": funcSinh, + "sort": funcSort, + "sort_desc": funcSortDesc, + "sort_by_label": funcSortByLabel, + "sort_by_label_desc": funcSortByLabelDesc, + "sqrt": funcSqrt, + "stddev_over_time": funcStddevOverTime, + "stdvar_over_time": funcStdvarOverTime, + "sum_over_time": funcSumOverTime, + "tan": funcTan, + "tanh": funcTanh, + "time": funcTime, + "timestamp": funcTimestamp, + "vector": funcVector, + "year": funcYear, } // AtModifierUnsafeFunctions are the functions whose result diff --git a/promql/fuzz.go b/promql/fuzz.go index 5f08e6a72c9..759055fb0d9 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -61,17 +61,13 @@ const ( var symbolTable = labels.NewSymbolTable() func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, false, symbolTable) - if warning != nil { + p, warning := textparse.New(in, contentType, "", false, false, symbolTable) + if p == nil || warning != nil { // An invalid content type is being passed, which should not happen // in this context. panic(warning) } - if contentType == "application/openmetrics-text" { - p = textparse.NewOpenMetricsParser(in, symbolTable) - } - var err error for { _, err = p.Next() @@ -95,7 +91,7 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { // Note that this is not the parser for the text-based exposition-format; that // lives in github.com/prometheus/client_golang/text. func FuzzParseMetric(in []byte) int { - return fuzzParseMetricWithContentType(in, "") + return fuzzParseMetricWithContentType(in, "text/plain") } func FuzzParseOpenMetric(in []byte) int { diff --git a/promql/fuzz_test.go b/promql/fuzz_test.go index 1f0bbaa662e..4a26798ded1 100644 --- a/promql/fuzz_test.go +++ b/promql/fuzz_test.go @@ -29,7 +29,7 @@ func TestfuzzParseMetricWithContentTypePanicOnInvalid(t *testing.T) { } else { err, ok := p.(error) require.True(t, ok) - require.Contains(t, err.Error(), "duplicate parameter name") + require.ErrorContains(t, err, "duplicate parameter name") } }() diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 434d3cdc1c7..aa65aca2755 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -202,10 +202,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, - "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, }, "hour": { Name: "hour", diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index befb9bdf3e6..c321a1e9735 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -667,10 +667,16 @@ label_set_list : label_set_list COMMA label_set_item label_set_item : IDENTIFIER EQL STRING { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } } + | string_identifier EQL STRING + { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } } | IDENTIFIER EQL error { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}} + | string_identifier EQL error + { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}} | IDENTIFIER error { yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}} + | string_identifier error + { yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}} | error { yylex.(*parser).unexpected("label set", "identifier or \"}\""); $$ = labels.Label{} } ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index ad58a52976e..8979410ceb4 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -251,293 +251,295 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 37, - 1, 138, - 10, 138, - 24, 138, + 1, 141, + 10, 141, + 24, 141, -2, 0, -1, 61, - 2, 181, - 15, 181, - 79, 181, - 85, 181, - -2, 102, - -1, 62, - 2, 182, - 15, 182, - 79, 182, - 85, 182, - -2, 103, - -1, 63, - 2, 183, - 15, 183, - 79, 183, - 85, 183, - -2, 105, - -1, 64, 2, 184, 15, 184, 79, 184, 85, 184, - -2, 106, - -1, 65, + -2, 102, + -1, 62, 2, 185, 15, 185, 79, 185, 85, 185, - -2, 107, - -1, 66, + -2, 103, + -1, 63, 2, 186, 15, 186, 79, 186, 85, 186, - -2, 112, - -1, 67, + -2, 105, + -1, 64, 2, 187, 15, 187, 79, 187, 85, 187, - -2, 114, - -1, 68, + -2, 106, + -1, 65, 2, 188, 15, 188, 79, 188, 85, 188, - -2, 116, - -1, 69, + -2, 107, + -1, 66, 2, 189, 15, 189, 79, 189, 85, 189, - -2, 117, - -1, 70, + -2, 112, + -1, 67, 2, 190, 15, 190, 79, 190, 85, 190, - -2, 118, - -1, 71, + -2, 114, + -1, 68, 2, 191, 15, 191, 79, 191, 85, 191, - -2, 119, - -1, 72, + -2, 116, + -1, 69, 2, 192, 15, 192, 79, 192, 85, 192, - -2, 120, - -1, 73, + -2, 117, + -1, 70, 2, 193, 15, 193, 79, 193, 85, 193, - -2, 124, - -1, 74, + -2, 118, + -1, 71, 2, 194, 15, 194, 79, 194, 85, 194, + -2, 119, + -1, 72, + 2, 195, + 15, 195, + 79, 195, + 85, 195, + -2, 120, + -1, 73, + 2, 196, + 15, 196, + 79, 196, + 85, 196, + -2, 124, + -1, 74, + 2, 197, + 15, 197, + 79, 197, + 85, 197, -2, 125, - -1, 200, - 9, 243, - 12, 243, - 13, 243, - 18, 243, - 19, 243, - 25, 243, - 41, 243, - 47, 243, - 48, 243, - 51, 243, - 57, 243, - 62, 243, - 63, 243, - 64, 243, - 65, 243, - 66, 243, - 67, 243, - 68, 243, - 69, 243, - 70, 243, - 71, 243, - 72, 243, - 73, 243, - 74, 243, - 75, 243, - 79, 243, - 83, 243, - 85, 243, - 88, 243, - 89, 243, + -1, 205, + 9, 246, + 12, 246, + 13, 246, + 18, 246, + 19, 246, + 25, 246, + 41, 246, + 47, 246, + 48, 246, + 51, 246, + 57, 246, + 62, 246, + 63, 246, + 64, 246, + 65, 246, + 66, 246, + 67, 246, + 68, 246, + 69, 246, + 70, 246, + 71, 246, + 72, 246, + 73, 246, + 74, 246, + 75, 246, + 79, 246, + 83, 246, + 85, 246, + 88, 246, + 89, 246, -2, 0, - -1, 201, - 9, 243, - 12, 243, - 13, 243, - 18, 243, - 19, 243, - 25, 243, - 41, 243, - 47, 243, - 48, 243, - 51, 243, - 57, 243, - 62, 243, - 63, 243, - 64, 243, - 65, 243, - 66, 243, - 67, 243, - 68, 243, - 69, 243, - 70, 243, - 71, 243, - 72, 243, - 73, 243, - 74, 243, - 75, 243, - 79, 243, - 83, 243, - 85, 243, - 88, 243, - 89, 243, + -1, 206, + 9, 246, + 12, 246, + 13, 246, + 18, 246, + 19, 246, + 25, 246, + 41, 246, + 47, 246, + 48, 246, + 51, 246, + 57, 246, + 62, 246, + 63, 246, + 64, 246, + 65, 246, + 66, 246, + 67, 246, + 68, 246, + 69, 246, + 70, 246, + 71, 246, + 72, 246, + 73, 246, + 74, 246, + 75, 246, + 79, 246, + 83, 246, + 85, 246, + 88, 246, + 89, 246, -2, 0, } const yyPrivate = 57344 -const yyLast = 799 +const yyLast = 804 var yyAct = [...]int16{ - 152, 334, 332, 155, 339, 226, 39, 192, 276, 44, - 291, 290, 118, 82, 178, 229, 107, 106, 346, 347, - 348, 349, 109, 108, 198, 239, 199, 156, 110, 105, - 6, 245, 200, 201, 133, 325, 111, 329, 228, 60, - 357, 293, 328, 304, 267, 160, 266, 128, 55, 151, - 302, 311, 302, 196, 340, 159, 55, 89, 54, 356, - 241, 242, 355, 113, 243, 114, 54, 98, 99, 265, - 112, 101, 256, 104, 88, 230, 232, 234, 235, 236, - 244, 246, 249, 250, 251, 252, 253, 257, 258, 105, - 333, 231, 233, 237, 238, 240, 247, 248, 103, 115, - 109, 254, 255, 324, 150, 218, 110, 264, 111, 270, - 77, 35, 7, 149, 188, 163, 322, 321, 173, 320, - 167, 170, 323, 165, 271, 166, 2, 3, 4, 5, - 263, 101, 194, 104, 180, 184, 197, 187, 186, 319, - 272, 202, 203, 204, 205, 206, 207, 208, 209, 210, - 211, 212, 213, 214, 215, 216, 195, 299, 103, 318, - 217, 36, 298, 1, 190, 219, 220, 317, 160, 160, - 316, 193, 160, 154, 182, 196, 229, 297, 159, 159, - 160, 358, 159, 268, 181, 183, 239, 260, 296, 262, - 159, 315, 245, 129, 314, 55, 225, 313, 161, 228, - 161, 161, 259, 312, 161, 54, 86, 295, 310, 288, - 289, 8, 161, 292, 162, 37, 162, 162, 49, 269, - 162, 241, 242, 309, 179, 243, 180, 127, 162, 126, - 308, 223, 294, 256, 48, 222, 230, 232, 234, 235, - 236, 244, 246, 249, 250, 251, 252, 253, 257, 258, - 221, 169, 231, 233, 237, 238, 240, 247, 248, 157, - 158, 164, 254, 255, 168, 10, 182, 300, 55, 301, - 303, 47, 305, 46, 132, 79, 181, 183, 54, 306, - 307, 45, 134, 135, 136, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 43, 59, 50, - 84, 9, 9, 121, 326, 78, 327, 130, 171, 121, - 83, 42, 131, 119, 335, 336, 337, 331, 185, 119, - 338, 261, 342, 341, 344, 343, 122, 117, 41, 177, - 350, 351, 122, 55, 176, 352, 53, 77, 40, 56, - 125, 354, 22, 54, 84, 124, 172, 175, 51, 57, - 191, 353, 273, 85, 83, 189, 359, 224, 123, 80, - 345, 120, 81, 153, 58, 75, 227, 52, 116, 0, - 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, - 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 0, 0, 0, 13, 0, 0, 0, 24, 0, 30, - 0, 0, 31, 32, 55, 38, 105, 53, 77, 0, - 56, 275, 0, 22, 54, 0, 0, 0, 274, 0, - 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, - 281, 282, 287, 87, 89, 0, 75, 0, 0, 0, - 0, 0, 18, 19, 98, 99, 20, 0, 101, 102, - 104, 88, 76, 0, 0, 0, 0, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 0, 0, 0, 13, 103, 0, 0, 24, 0, - 30, 0, 55, 31, 32, 53, 77, 0, 56, 330, - 0, 22, 54, 0, 0, 0, 0, 0, 57, 0, - 278, 279, 277, 284, 286, 283, 285, 280, 281, 282, - 287, 0, 0, 0, 75, 0, 0, 0, 0, 0, - 18, 19, 0, 0, 20, 0, 0, 0, 17, 77, - 76, 0, 0, 0, 22, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, - 0, 0, 13, 0, 0, 0, 24, 0, 30, 0, - 0, 31, 32, 18, 19, 0, 0, 20, 0, 0, - 0, 17, 35, 0, 0, 0, 0, 22, 11, 12, - 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, - 33, 34, 0, 0, 0, 13, 0, 0, 0, 24, - 0, 30, 0, 0, 31, 32, 18, 19, 0, 0, - 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, - 27, 28, 29, 33, 34, 105, 0, 0, 13, 0, - 0, 0, 24, 174, 30, 0, 0, 31, 32, 0, + 155, 339, 337, 158, 344, 231, 39, 197, 281, 44, + 296, 295, 84, 120, 82, 181, 109, 108, 351, 352, + 353, 354, 107, 111, 203, 136, 204, 159, 154, 112, + 205, 206, 234, 6, 271, 55, 163, 163, 107, 334, + 333, 307, 244, 275, 309, 54, 162, 162, 250, 363, + 91, 272, 330, 131, 362, 233, 60, 270, 276, 110, + 100, 101, 298, 115, 103, 116, 106, 90, 164, 164, + 114, 265, 113, 361, 277, 307, 360, 246, 247, 338, + 103, 248, 106, 153, 165, 165, 264, 316, 201, 261, + 122, 105, 235, 237, 239, 240, 241, 249, 251, 254, + 255, 256, 257, 258, 262, 263, 273, 105, 236, 238, + 242, 243, 245, 252, 253, 152, 117, 166, 259, 260, + 176, 164, 170, 173, 163, 168, 223, 169, 172, 2, + 3, 4, 5, 107, 162, 199, 111, 165, 187, 202, + 189, 171, 112, 269, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 200, + 89, 91, 113, 222, 123, 193, 268, 329, 224, 225, + 183, 100, 101, 191, 121, 103, 104, 106, 90, 7, + 85, 234, 266, 182, 55, 183, 328, 86, 192, 123, + 83, 244, 122, 267, 54, 132, 190, 250, 188, 121, + 345, 230, 105, 86, 233, 77, 35, 119, 304, 10, + 185, 327, 86, 303, 293, 294, 157, 315, 297, 79, + 184, 186, 326, 163, 274, 185, 246, 247, 302, 325, + 248, 324, 314, 162, 323, 184, 186, 299, 261, 313, + 322, 235, 237, 239, 240, 241, 249, 251, 254, 255, + 256, 257, 258, 262, 263, 164, 321, 236, 238, 242, + 243, 245, 252, 253, 180, 126, 320, 259, 260, 179, + 125, 165, 305, 319, 306, 308, 318, 310, 317, 130, + 88, 129, 178, 124, 311, 312, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 195, 160, 161, 50, 163, 36, 167, 198, 331, + 78, 332, 201, 228, 55, 162, 85, 227, 1, 340, + 341, 342, 336, 49, 54, 343, 83, 347, 346, 349, + 348, 48, 226, 47, 81, 355, 356, 164, 55, 86, + 357, 53, 77, 301, 56, 8, 359, 22, 54, 37, + 55, 175, 46, 165, 57, 128, 135, 127, 45, 43, + 54, 364, 300, 59, 133, 174, 9, 9, 42, 134, + 75, 41, 40, 51, 196, 358, 18, 19, 278, 87, + 20, 194, 229, 80, 350, 156, 76, 58, 232, 52, + 118, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 0, 0, 0, 13, 0, + 0, 0, 24, 0, 30, 0, 0, 31, 32, 55, + 38, 0, 53, 77, 0, 56, 280, 0, 22, 54, + 0, 0, 0, 279, 0, 57, 0, 283, 284, 282, + 289, 291, 288, 290, 285, 286, 287, 292, 0, 0, + 0, 75, 0, 0, 0, 0, 0, 18, 19, 0, + 0, 20, 0, 0, 0, 0, 0, 76, 0, 0, + 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 0, 0, 0, 13, + 0, 0, 0, 24, 0, 30, 0, 55, 31, 32, + 53, 77, 0, 56, 335, 0, 22, 54, 0, 0, + 0, 0, 0, 57, 0, 283, 284, 282, 289, 291, + 288, 290, 285, 286, 287, 292, 0, 0, 0, 75, + 0, 0, 0, 0, 0, 18, 19, 0, 0, 20, + 0, 0, 0, 17, 77, 76, 0, 0, 0, 22, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 0, 0, 0, 13, 0, 0, + 0, 24, 0, 30, 0, 0, 31, 32, 18, 19, + 0, 0, 20, 0, 0, 0, 17, 35, 0, 0, + 0, 0, 22, 11, 12, 14, 15, 16, 21, 23, + 25, 26, 27, 28, 29, 33, 34, 0, 0, 0, + 13, 0, 0, 0, 24, 0, 30, 0, 0, 31, + 32, 18, 19, 0, 0, 20, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, + 16, 21, 23, 25, 26, 27, 28, 29, 33, 34, + 107, 0, 0, 13, 0, 0, 0, 24, 177, 30, + 0, 0, 31, 32, 0, 0, 0, 0, 0, 107, + 0, 0, 0, 0, 0, 0, 0, 89, 91, 92, + 0, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 0, 103, 104, 106, 90, 89, 91, 92, 0, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 0, 103, 104, 106, 90, 107, 0, 0, 0, 105, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 107, 0, 0, 0, 105, 0, + 0, 0, 89, 91, 92, 0, 93, 94, 95, 0, + 97, 98, 99, 100, 101, 102, 0, 103, 104, 106, + 90, 89, 91, 92, 0, 93, 94, 0, 0, 97, + 98, 0, 100, 101, 102, 0, 103, 104, 106, 90, 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, - 0, 0, 87, 89, 90, 0, 91, 92, 93, 94, - 95, 96, 97, 98, 99, 100, 0, 101, 102, 104, - 88, 87, 89, 90, 0, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 0, 101, 102, 104, 88, - 105, 0, 0, 0, 103, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 105, - 0, 0, 0, 103, 0, 0, 0, 87, 89, 90, - 0, 91, 92, 93, 0, 95, 96, 97, 98, 99, - 100, 0, 101, 102, 104, 88, 87, 89, 90, 0, - 91, 92, 0, 0, 95, 96, 0, 98, 99, 100, - 0, 101, 102, 104, 88, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 103, + 0, 0, 0, 105, } var yyPact = [...]int16{ - 28, 102, 569, 569, 405, 526, -1000, -1000, -1000, 98, + 31, 169, 574, 574, 410, 531, -1000, -1000, -1000, 193, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 342, -1000, 204, -1000, 650, + -1000, -1000, -1000, -1000, -1000, 314, -1000, 278, -1000, 655, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 21, 93, -1000, -1000, 483, -1000, 483, 97, + -1000, -1000, 57, 147, -1000, -1000, 488, -1000, 488, 192, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 307, -1000, -1000, - 338, -1000, -1000, 225, -1000, 23, -1000, -44, -44, -44, - -44, -44, -44, -44, -44, -44, -44, -44, -44, -44, - -44, -44, -44, 47, 171, 259, 93, -57, -1000, 249, - 249, 324, -1000, 631, 75, -1000, 327, -1000, -1000, 222, - 130, -1000, -1000, -1000, 298, -1000, 112, -1000, 159, 483, - -1000, -58, -48, -1000, 483, 483, 483, 483, 483, 483, - 483, 483, 483, 483, 483, 483, 483, 483, 483, -1000, - 39, -1000, -1000, 90, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 36, 36, 229, -1000, -1000, -1000, -1000, 174, -1000, - -1000, 180, -1000, 650, -1000, -1000, 301, -1000, 105, -1000, - -1000, -1000, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000, - 18, 157, 83, -1000, -1000, -1000, 404, 15, 249, 249, - 249, 249, 75, 75, 402, 402, 402, 715, 696, 402, - 402, 715, 75, 75, 402, 75, 15, -1000, 19, -1000, - -1000, -1000, 186, -1000, 155, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 187, -1000, -1000, + 263, -1000, -1000, 353, 277, -1000, -1000, 29, -1000, -53, + -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, + -53, -53, -53, -53, -53, 26, 214, 305, 147, -56, + -1000, 126, 126, 329, -1000, 636, 24, -1000, 262, -1000, + -1000, 181, 166, -1000, -1000, 178, -1000, 171, -1000, 163, + -1000, 296, 488, -1000, -58, -50, -1000, 488, 488, 488, + 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, + 488, 488, -1000, 175, -1000, -1000, 111, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 115, 115, 311, -1000, -1000, -1000, + -1000, 179, -1000, -1000, 64, -1000, 655, -1000, -1000, 162, + -1000, 141, -1000, -1000, -1000, -1000, -1000, 32, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 25, 80, 17, -1000, -1000, + -1000, 409, 8, 126, 126, 126, 126, 24, 24, 119, + 119, 119, 720, 701, 119, 119, 720, 24, 24, 119, + 24, 8, -1000, 40, -1000, -1000, -1000, 341, -1000, 206, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 483, -1000, -1000, -1000, -1000, -1000, -1000, 31, 31, 17, - 31, 37, 37, 206, 34, -1000, -1000, 197, 191, 188, - 185, 164, 161, 153, 133, 113, 111, 110, -1000, -1000, - -1000, -1000, -1000, -1000, 101, -1000, -1000, -1000, 13, -1000, - 650, -1000, -1000, -1000, 31, -1000, 16, 11, 482, -1000, - -1000, -1000, 33, 163, 163, 163, 36, 40, 40, 33, - 40, 33, -74, -1000, -1000, -1000, -1000, -1000, 31, 31, - -1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, -1000, -1000, - 163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 38, -1000, 160, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 488, -1000, -1000, -1000, -1000, + -1000, -1000, 56, 56, 18, 56, 72, 72, 215, 70, + -1000, -1000, 272, 270, 267, 260, 250, 234, 228, 225, + 223, 216, 205, -1000, -1000, -1000, -1000, -1000, -1000, 165, + -1000, -1000, -1000, 30, -1000, 655, -1000, -1000, -1000, 56, + -1000, 14, 13, 487, -1000, -1000, -1000, 22, 27, 27, + 27, 115, 186, 186, 22, 186, 22, -74, -1000, -1000, + -1000, -1000, -1000, 56, 56, -1000, -1000, -1000, 56, -1000, + -1000, -1000, -1000, -1000, -1000, 27, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 52, -1000, + 28, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 368, 12, 367, 5, 14, 366, 298, 364, 363, - 361, 360, 265, 211, 359, 13, 357, 10, 11, 355, - 353, 7, 352, 8, 4, 351, 2, 1, 3, 350, - 27, 0, 348, 338, 17, 193, 328, 312, 6, 311, - 308, 16, 307, 39, 297, 9, 281, 274, 273, 271, - 234, 218, 299, 163, 161, + 0, 390, 13, 389, 5, 15, 388, 363, 387, 385, + 12, 384, 209, 345, 383, 14, 382, 10, 11, 381, + 379, 7, 378, 8, 4, 375, 2, 1, 3, 374, + 27, 0, 373, 372, 17, 195, 371, 369, 6, 368, + 365, 16, 364, 56, 359, 9, 358, 356, 352, 333, + 331, 323, 304, 318, 306, } var yyR1 = [...]int8{ @@ -554,18 +556,18 @@ var yyR1 = [...]int8{ 13, 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 12, 12, 12, - 14, 14, 14, 15, 15, 15, 15, 54, 20, 20, - 20, 20, 19, 19, 19, 19, 19, 19, 19, 19, - 19, 29, 29, 29, 21, 21, 21, 21, 22, 22, - 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 24, 24, 25, 25, 25, 11, 11, 11, - 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, + 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, + 54, 20, 20, 20, 20, 19, 19, 19, 19, 19, + 19, 19, 19, 19, 29, 29, 29, 21, 21, 21, + 21, 22, 22, 22, 23, 23, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 24, 24, 25, 25, 25, + 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 8, 8, 5, 5, 5, 5, - 45, 45, 28, 28, 30, 30, 31, 31, 27, 26, - 26, 49, 10, 18, 18, + 6, 6, 6, 6, 6, 6, 6, 8, 8, 5, + 5, 5, 5, 45, 45, 28, 28, 30, 30, 31, + 31, 27, 26, 26, 49, 10, 18, 18, } var yyR2 = [...]int8{ @@ -582,18 +584,18 @@ var yyR2 = [...]int8{ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, - 3, 1, 2, 3, 3, 2, 1, 2, 0, 3, - 2, 1, 1, 3, 1, 3, 4, 1, 3, 5, - 5, 1, 1, 1, 4, 3, 3, 2, 3, 1, - 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 4, 3, 3, 1, 2, 1, 1, 1, + 3, 1, 2, 3, 3, 3, 3, 2, 2, 1, + 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, + 1, 3, 5, 5, 1, 1, 1, 4, 3, 3, + 2, 3, 1, 2, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 4, 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, - 1, 1, 1, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, + 1, 1, 2, 1, 1, 1, 0, 1, } var yyChk = [...]int16{ @@ -605,34 +607,35 @@ var yyChk = [...]int16{ -52, -32, -3, 12, 19, 9, 15, 25, -8, -7, -43, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 41, 57, 13, -52, -12, - -14, 20, -15, 12, 2, -20, 2, 41, 59, 42, - 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 56, 57, 83, 58, 14, -34, -41, 2, 79, - 85, 15, -41, -38, -38, -43, -1, 20, -2, 12, - -10, 2, 25, 20, 7, 2, 4, 2, 24, -35, - -42, -37, -47, 78, -35, -35, -35, -35, -35, -35, - -35, -35, -35, -35, -35, -35, -35, -35, -35, -45, - 57, 2, -31, -9, 2, -28, -30, 88, 89, 19, - 9, 41, 57, -45, 2, -41, -34, -17, 15, 2, - -17, -40, 22, -38, 22, 20, 7, 2, -5, 2, - 4, 54, 44, 55, -5, 20, -15, 25, 2, -19, - 5, -29, -21, 12, -28, -30, 16, -38, 82, 84, - 80, 81, -38, -38, -38, -38, -38, -38, -38, -38, - -38, -38, -38, -38, -38, -38, -38, -45, 15, -28, - -28, 21, 6, 2, -16, 22, -4, -6, 25, 2, - 62, 78, 63, 79, 64, 65, 66, 80, 81, 12, - 82, 47, 48, 51, 67, 18, 68, 83, 84, 69, - 70, 71, 72, 73, 88, 89, 59, 74, 75, 22, - 7, 20, -2, 25, 2, 25, 2, 26, 26, -30, - 26, 41, 57, -22, 24, 17, -23, 30, 28, 29, - 35, 36, 37, 33, 31, 34, 32, 38, -17, -17, - -18, -17, -18, 22, -45, 21, 2, 22, 7, 2, - -38, -27, 19, -27, 26, -27, -21, -21, 24, 17, - 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 21, 2, 22, -4, -27, 26, 26, - 17, -23, -26, 57, -27, -31, -31, -31, -28, -24, - 14, -24, -26, -24, -26, -11, 92, 93, 94, 95, - -27, -27, -27, -25, -31, 24, 21, 2, 21, -31, + -14, 20, -15, 12, -10, 2, 25, -20, 2, 41, + 59, 42, 43, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 56, 57, 83, 58, 14, -34, -41, + 2, 79, 85, 15, -41, -38, -38, -43, -1, 20, + -2, 12, -10, 2, 20, 7, 2, 4, 2, 4, + 2, 24, -35, -42, -37, -47, 78, -35, -35, -35, + -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, + -35, -35, -45, 57, 2, -31, -9, 2, -28, -30, + 88, 89, 19, 9, 41, 57, -45, 2, -41, -34, + -17, 15, 2, -17, -40, 22, -38, 22, 20, 7, + 2, -5, 2, 4, 54, 44, 55, -5, 20, -15, + 25, 2, 25, 2, -19, 5, -29, -21, 12, -28, + -30, 16, -38, 82, 84, 80, 81, -38, -38, -38, + -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, + -38, -38, -45, 15, -28, -28, 21, 6, 2, -16, + 22, -4, -6, 25, 2, 62, 78, 63, 79, 64, + 65, 66, 80, 81, 12, 82, 47, 48, 51, 67, + 18, 68, 83, 84, 69, 70, 71, 72, 73, 88, + 89, 59, 74, 75, 22, 7, 20, -2, 25, 2, + 25, 2, 26, 26, -30, 26, 41, 57, -22, 24, + 17, -23, 30, 28, 29, 35, 36, 37, 33, 31, + 34, 32, 38, -17, -17, -18, -17, -18, 22, -45, + 21, 2, 22, 7, 2, -38, -27, 19, -27, 26, + -27, -21, -21, 24, 17, 2, 17, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 21, 2, + 22, -4, -27, 26, 26, 17, -23, -26, 57, -27, + -31, -31, -31, -28, -24, 14, -24, -26, -24, -26, + -11, 92, 93, 94, 95, -27, -27, -27, -25, -31, + 24, 21, 2, 21, -31, } var yyDef = [...]int16{ @@ -641,37 +644,38 @@ var yyDef = [...]int16{ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 108, 230, 231, 0, 241, 0, 85, + 18, 19, 0, 108, 233, 234, 0, 244, 0, 85, 86, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, 224, 225, 0, 5, 100, - 0, 128, 131, 0, 136, 137, 141, 43, 43, 43, + -2, -2, -2, -2, -2, 227, 228, 0, 5, 100, + 0, 128, 131, 0, 0, 139, 245, 140, 144, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, - 0, 0, 61, 0, 83, 84, 0, 89, 91, 0, - 95, 99, 242, 126, 0, 132, 0, 135, 140, 0, - 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, - 0, 70, 71, 0, 73, 236, 237, 74, 75, 232, - 233, 0, 0, 0, 82, 20, 21, 24, 0, 54, - 25, 0, 63, 65, 67, 87, 0, 92, 0, 98, - 226, 227, 228, 229, 0, 127, 130, 133, 134, 139, - 142, 144, 147, 151, 152, 153, 0, 26, 0, 0, - -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 69, 0, 234, - 235, 76, 0, 81, 0, 53, 56, 58, 59, 60, - 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, - 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, - 215, 216, 217, 218, 219, 220, 221, 222, 223, 62, - 66, 88, 90, 93, 97, 94, 96, 0, 0, 0, - 0, 0, 0, 0, 0, 157, 159, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 45, 46, - 49, 244, 50, 72, 0, 78, 80, 51, 0, 57, - 64, 143, 238, 145, 0, 148, 0, 0, 0, 155, - 160, 156, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 77, 79, 52, 55, 146, 0, 0, - 154, 158, 161, 0, 240, 162, 163, 164, 165, 166, - 0, 167, 168, 169, 170, 171, 177, 178, 179, 180, - 149, 150, 239, 0, 175, 0, 173, 176, 172, 174, + 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, + 23, 0, 0, 0, 61, 0, 83, 84, 0, 89, + 91, 0, 95, 99, 126, 0, 132, 0, 137, 0, + 138, 143, 0, 42, 47, 48, 44, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 68, 0, 70, 71, 0, 73, 239, 240, + 74, 75, 235, 236, 0, 0, 0, 82, 20, 21, + 24, 0, 54, 25, 0, 63, 65, 67, 87, 0, + 92, 0, 98, 229, 230, 231, 232, 0, 127, 130, + 133, 135, 134, 136, 142, 145, 147, 150, 154, 155, + 156, 0, 26, 0, 0, -2, -2, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 69, 0, 237, 238, 76, 0, 81, 0, + 53, 56, 58, 59, 60, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 62, 66, 88, 90, 93, 97, + 94, 96, 0, 0, 0, 0, 0, 0, 0, 0, + 160, 162, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 45, 46, 49, 247, 50, 72, 0, + 78, 80, 51, 0, 57, 64, 146, 241, 148, 0, + 151, 0, 0, 0, 158, 163, 159, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 77, 79, + 52, 55, 149, 0, 0, 157, 161, 164, 0, 243, + 165, 166, 167, 168, 169, 0, 170, 171, 172, 173, + 174, 180, 181, 182, 183, 152, 153, 242, 0, 178, + 0, 176, 179, 175, 177, } var yyTok1 = [...]int8{ @@ -1614,24 +1618,41 @@ yydefault: yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } case 134: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} + } + case 135: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 135: + case 136: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).unexpected("label set", "string") + yyVAL.label = labels.Label{} + } + case 137: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 136: + case 138: + yyDollar = yyS[yypt-2 : yypt+1] + { + yylex.(*parser).unexpected("label set", "\"=\"") + yyVAL.label = labels.Label{} + } + case 139: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 137: + case 140: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1639,33 +1660,33 @@ yydefault: values: yyDollar[2].series, } } - case 138: + case 141: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 139: + case 142: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 140: + case 143: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 141: + case 144: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 142: + case 145: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 143: + case 146: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1673,12 +1694,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 144: + case 147: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 145: + case 148: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1687,7 +1708,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 146: + case 149: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1697,12 +1718,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 147: + case 150: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 148: + case 151: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1712,7 +1733,7 @@ yydefault: //$1 += $2 } } - case 149: + case 152: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1721,7 +1742,7 @@ yydefault: } yyVAL.series = val } - case 150: + case 153: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1730,7 +1751,7 @@ yydefault: } yyVAL.series = val } - case 151: + case 154: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1738,130 +1759,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 154: + case 157: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 155: + case 158: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 156: + case 159: yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 157: + case 160: yyDollar = yyS[yypt-2 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 158: + case 161: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 159: + case 162: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 160: + case 163: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 161: + case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["schema"] = yyDollar[3].int } - case 162: + case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["sum"] = yyDollar[3].float } - case 163: + case 166: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["count"] = yyDollar[3].float } - case 164: + case 167: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket"] = yyDollar[3].float } - case 165: + case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } - case 166: + case 169: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } - case 167: + case 170: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } - case 168: + case 171: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["offset"] = yyDollar[3].int } - case 169: + case 172: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } - case 170: + case 173: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_offset"] = yyDollar[3].int } - case 171: + case 174: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } - case 172: + case 175: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 173: + case 176: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 174: + case 177: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) } - case 175: + case 178: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 230: + case 233: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1869,7 +1890,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 231: + case 234: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1883,12 +1904,12 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 232: + case 235: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 233: + case 236: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1899,17 +1920,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 234: + case 237: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 235: + case 238: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 238: + case 241: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1918,17 +1939,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 239: + case 242: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 240: + case 243: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 241: + case 244: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1936,7 +1957,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 242: + case 245: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1945,7 +1966,7 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 243: + case 246: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 11cabd971dc..0e5e2f638bb 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -4006,8 +4006,7 @@ func TestParseExpressions(t *testing.T) { require.Equal(t, expected, expr, "error on input '%s'", test.input) } else { - require.Error(t, err) - require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) + require.ErrorContains(t, err, test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) var errorList ParseErrors ok := errors.As(err, &errorList) @@ -4482,6 +4481,22 @@ func TestHistogramTestExpression(t *testing.T) { }, expected: `{{offset:-3 buckets:[5.1 0 0 0 0 10 7] n_offset:-1 n_buckets:[4.1 5 0 0 7 8 9]}}`, }, + { + name: "known counter reset hint", + input: histogram.FloatHistogram{ + Schema: 1, + Sum: -0.3, + Count: 3.1, + ZeroCount: 7.1, + ZeroThreshold: 0.05, + PositiveBuckets: []float64{5.1, 10, 7}, + PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, + NegativeBuckets: []float64{4.1, 5}, + NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, + CounterResetHint: histogram.CounterReset, + }, + expected: `{{schema:1 count:3.1 sum:-0.3 z_bucket:7.1 z_bucket_w:0.05 counter_reset_hint:reset offset:-3 buckets:[5.1 10 7] n_offset:-5 n_buckets:[4.1 5]}}`, + }, } { t.Run(test.name, func(t *testing.T) { expression := test.input.TestExpression() @@ -4533,7 +4548,7 @@ func TestRecoverParserError(t *testing.T) { e := errors.New("custom error") defer func() { - require.Equal(t, e.Error(), err.Error()) + require.EqualError(t, err, e.Error()) }() defer p.recover(&err) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index ff709e44268..e078bcb60bb 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/almost" + "github.com/prometheus/prometheus/util/convertnhcb" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -46,8 +47,8 @@ import ( var ( patSpace = regexp.MustCompile("[\t ]+") patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered|info))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) ) const ( @@ -321,6 +322,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { cmd.fail = true case "warn": cmd.warn = true + case "info": + cmd.info = true } for j := 1; i+1 < len(lines); j++ { @@ -477,43 +480,22 @@ func (cmd *loadCmd) append(a storage.Appender) error { return nil } -func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint64) { - mName := m.Get(labels.MetricName) - baseM := labels.NewBuilder(m). - Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). - Del(labels.BucketLabel). - Labels() - hash := baseM.Hash() - return baseM, hash -} - type tempHistogramWrapper struct { metric labels.Labels upperBounds []float64 - histogramByTs map[int64]tempHistogram + histogramByTs map[int64]convertnhcb.TempHistogram } func newTempHistogramWrapper() tempHistogramWrapper { return tempHistogramWrapper{ upperBounds: []float64{}, - histogramByTs: map[int64]tempHistogram{}, - } -} - -type tempHistogram struct { - bucketCounts map[float64]float64 - count float64 - sum float64 -} - -func newTempHistogram() tempHistogram { - return tempHistogram{ - bucketCounts: map[float64]float64{}, + histogramByTs: map[int64]convertnhcb.TempHistogram{}, } } -func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*tempHistogram, float64)) { - m2, m2hash := getHistogramMetricBase(m, suffix) +func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*convertnhcb.TempHistogram, float64)) { + m2 := convertnhcb.GetHistogramMetricBase(m, suffix) + m2hash := m2.Hash() histogramWrapper, exists := histogramMap[m2hash] if !exists { histogramWrapper = newTempHistogramWrapper() @@ -528,7 +510,7 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap } histogram, exists := histogramWrapper.histogramByTs[s.T] if !exists { - histogram = newTempHistogram() + histogram = convertnhcb.NewTempHistogram() } updateHistogram(&histogram, s.F) histogramWrapper.histogramByTs[s.T] = histogram @@ -536,34 +518,6 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap histogramMap[m2hash] = histogramWrapper } -func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) { - sort.Float64s(upperBounds0) - upperBounds := make([]float64, 0, len(upperBounds0)) - prevLE := math.Inf(-1) - for _, le := range upperBounds0 { - if le != prevLE { // deduplicate - upperBounds = append(upperBounds, le) - prevLE = le - } - } - var customBounds []float64 - if upperBounds[len(upperBounds)-1] == math.Inf(1) { - customBounds = upperBounds[:len(upperBounds)-1] - } else { - customBounds = upperBounds - } - return upperBounds, &histogram.FloatHistogram{ - Count: 0, - Sum: 0, - Schema: histogram.CustomBucketsSchema, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: uint32(len(upperBounds))}, - }, - PositiveBuckets: make([]float64, len(upperBounds)), - CustomValues: customBounds, - } -} - // If classic histograms are defined, convert them into native histograms with custom // bounds and append the defined time series to the storage. func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { @@ -582,16 +536,16 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { } processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) { histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le) - }, func(histogram *tempHistogram, f float64) { - histogram.bucketCounts[le] = f + }, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.BucketCounts[le] = f }) case strings.HasSuffix(mName, "_count"): - processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { - histogram.count = f + processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.Count = f }) case strings.HasSuffix(mName, "_sum"): - processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { - histogram.sum = f + processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.Sum = f }) } } @@ -599,30 +553,21 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { // Convert the collated classic histogram data into native histograms // with custom bounds and append them to the storage. for _, histogramWrapper := range histogramMap { - upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds) + upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds, true) + fhBase := hBase.ToFloat(nil) samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs)) for t, histogram := range histogramWrapper.histogramByTs { - fh := fhBase.Copy() - var prevCount, total float64 - for i, le := range upperBounds { - currCount, exists := histogram.bucketCounts[le] - if !exists { - currCount = 0 + h, fh := convertnhcb.NewHistogram(histogram, upperBounds, hBase, fhBase) + if fh == nil { + if err := h.Validate(); err != nil { + return err } - count := currCount - prevCount - fh.PositiveBuckets[i] = count - total += count - prevCount = currCount - } - fh.Sum = histogram.sum - if histogram.count != 0 { - total = histogram.count + fh = h.ToFloat(nil) } - fh.Count = total - s := promql.Sample{T: t, H: fh.Compact(0)} - if err := s.H.Validate(); err != nil { + if err := fh.Validate(); err != nil { return err } + s := promql.Sample{T: t, H: fh} samples = append(samples, s) } sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T }) @@ -657,10 +602,10 @@ type evalCmd struct { step time.Duration line int - isRange bool // if false, instant query - fail, warn, ordered bool - expectedFailMessage string - expectedFailRegexp *regexp.Regexp + isRange bool // if false, instant query + fail, warn, ordered, info bool + expectedFailMessage string + expectedFailRegexp *regexp.Regexp metrics map[uint64]labels.Labels expectScalar bool @@ -790,7 +735,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) { - return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s)) + return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H.TestExpression(), actual.H.TestExpression(), formatSeriesResult(s)) } } } @@ -1006,7 +951,13 @@ func formatSeriesResult(s promql.Series) string { histogramPlural = "" } - return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, s.Histograms) + histograms := make([]string, 0, len(s.Histograms)) + + for _, p := range s.Histograms { + histograms = append(histograms, fmt.Sprintf("%v @[%v]", p.H.TestExpression(), p.T)) + } + + return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, histograms) } // HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil. @@ -1202,13 +1153,16 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } - countWarnings, _ := res.Warnings.CountWarningsAndInfo() + countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() if !cmd.warn && countWarnings > 0 { return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) } if cmd.warn && countWarnings == 0 { return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) } + if cmd.info && countInfo == 0 { + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } err = cmd.compareResult(res.Value) if err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index 5aff71fb14e..5da924e9a5a 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -237,7 +237,7 @@ eval instant at 5m sum by (group) (http_requests) load 5m testmetric {{}} -eval instant at 5m testmetric +eval instant at 0m testmetric `, expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {count:0, sum:0}`, }, @@ -381,7 +381,7 @@ load 5m eval range from 0 to 10m step 5m testmetric testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:7 count:1 buckets:[1] offset:1}} {{schema:-1 sum:8 count:1 buckets:[1] offset:1}} `, - expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {count:1, sum:7, (1,4]:1}, but got {count:1, sum:5, (1,4]:1} (result has 0 float points [] and 3 histogram points [{count:1, sum:4, (1,4]:1} @[0] {count:1, sum:5, (1,4]:1} @[300000] {count:1, sum:6, (1,4]:1} @[600000]])`, + expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {{schema:-1 count:1 sum:7 offset:1 buckets:[1]}}, but got {{schema:-1 count:1 sum:5 counter_reset_hint:not_reset offset:1 buckets:[1]}} (result has 0 float points [] and 3 histogram points [{{schema:-1 count:1 sum:4 offset:1 buckets:[1]}} @[0] {{schema:-1 count:1 sum:5 counter_reset_hint:not_reset offset:1 buckets:[1]}} @[300000] {{schema:-1 count:1 sum:6 counter_reset_hint:not_reset offset:1 buckets:[1]}} @[600000]])`, }, "range query with too many points for query time range": { input: testData + ` @@ -532,7 +532,7 @@ load 5m eval range from 0 to 5m step 5m testmetric testmetric 2 3 `, - expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{count:0, sum:0} @[0] {count:0, sum:0} @[300000]]`, + expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{{}} @[0] {{counter_reset_hint:not_reset}} @[300000]]`, }, "range query with expected mixed results": { input: ` @@ -552,7 +552,7 @@ load 5m eval range from 0 to 5m step 5m testmetric testmetric {{}} 3 `, - expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`, + expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{{}} @[300000]])`, }, "instant query with expected scalar result": { input: ` diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index 68d2e735b37..e2eb381dbcb 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -250,7 +250,7 @@ clear load 5m http_requests{job="api-server", instance="0", group="production"} 0+10x10 http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 http_requests{job="app-server", instance="0", group="production"} 0+50x10 @@ -337,32 +337,32 @@ load 5m version{job="app-server", instance="0", group="canary"} 7 version{job="app-server", instance="1", group="canary"} 7 -eval instant at 5m count_values("version", version) +eval instant at 1m count_values("version", version) {version="6"} 5 {version="7"} 2 {version="8"} 2 -eval instant at 5m count_values(((("version"))), version) +eval instant at 1m count_values(((("version"))), version) {version="6"} 5 {version="7"} 2 {version="8"} 2 -eval instant at 5m count_values without (instance)("version", version) +eval instant at 1m count_values without (instance)("version", version) {job="api-server", group="production", version="6"} 3 {job="api-server", group="canary", version="8"} 2 {job="app-server", group="production", version="6"} 2 {job="app-server", group="canary", version="7"} 2 # Overwrite label with output. Don't do this. -eval instant at 5m count_values without (instance)("job", version) +eval instant at 1m count_values without (instance)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 # Overwrite label with output. Don't do this. -eval instant at 5m count_values by (job, group)("job", version) +eval instant at 1m count_values by (job, group)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 @@ -572,3 +572,160 @@ clear # #eval instant at 1m count(topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without())) # {} 1 + +clear + +# Test stddev produces consistent results regardless the order the data is loaded in. +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + +eval instant at 0m stddev(series) + {} 0.5 + +eval instant at 0m stdvar(series) + {} 0.25 + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + +clear + +load 5m + series{label="a"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} 0.5 + +eval instant at 0m stdvar(series) + {} 0.25 + +eval instant at 0m stddev by (label) (series) + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} NaN + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} inf + +eval instant at 0m stddev (series) + {} NaN + +eval instant at 0m stdvar (series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} inf + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series inf + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test index 35f90ee6714..4091f7eabf2 100644 --- a/promql/promqltest/testdata/at_modifier.test +++ b/promql/promqltest/testdata/at_modifier.test @@ -121,45 +121,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100) # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. # Inner most sum=1+2+...+10=55. -# With [100s:25s] subquery, it's 55*5. +# With [100s:25s] subquery, it's 55*4. eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50) - {job="1"} 275 + {job="1"} 220 # Nested subqueries with different timestamps on both. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. -# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times. +# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000) - {job="1"} 1100 + {job="1"} 660 # Testing the inner subquery timestamp since vector selector does not have @. # Inner sum for subquery [100s:25s] @ 50 are -# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9. -# This sum of 11 is repeated 4 times by outer subquery. +# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5. +# This sum of 7 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200) - {job="1"} 44 + {job="1"} 21 # Inner sum for subquery [100s:25s] @ 200 are -# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20. -# This sum of 116 is repeated 4 times by outer subquery. +# at 125=12, at 150=15, at 175=17, at 200=20. +# This sum of 64 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50) - {job="1"} 464 + {job="1"} 192 # Nested subqueries with timestamp only on outer subquery. # Outer most subquery: -# at 900=783 -# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87 -# at 925=537 -# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91 -# at 950=828 -# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92 -# at 975=567 -# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95 -# at 1000=873 -# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97 +# at 925=360 +# inner subquery: at 905=90+89, at 915=91+90 +# at 950=372 +# inner subquery: at 930=93+92, at 940=94+93 +# at 975=380 +# inner subquery: at 955=95+94, at 965=96+95 +# at 1000=392 +# inner subquery: at 980=98+97, at 990=99+98 eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000) - {job="1"} 3588 + {job="1"} 1504 # minute is counted on the value of the sample. eval instant at 10s minute(metric @ 1500) @@ -182,32 +180,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10)) # minute is counted on the value of the sample. eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s]) - {job="1"} 22 - {job="2"} 55 + {job="1"} 20 + {job="2"} 50 # If nothing passed, minute() takes eval time. # Here the eval time is determined by the subquery. # [50m:1m] at 6000, i.e. 100m, is 50m to 100m. -# sum=50+51+52+...+59+0+1+2+...+40. +# sum=51+52+...+59+0+1+2+...+40. eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000) - {} 1365 + {} 1315 -# sum=45+46+47+...+59+0+1+2+...+35. +# sum=46+47+...+59+0+1+2+...+35. eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m) - {} 1410 + {} 1365 # time() is the eval time which is determined by subquery here. -# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2. +# 2901+...+3000 = (3000*3001 - 2899*2900)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000) - {} 297950 + {} 295050 -# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2. +# 2301+...+2400 = (2400*2401 - 2299*2300)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s) - {} 237350 + {} 235050 # timestamp() takes the time of the sample and not the evaluation time. eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000) - {job="1"} 110 + {job="1"} 100 # The result of inner timestamp() will have the timestamp as the # eval time, hence entire expression is not step invariant and depends on eval time. diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 6e2b3630bcb..fb1d1696244 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -6,11 +6,13 @@ load 5m # Tests for resets(). eval instant at 50m resets(http_requests[5m]) + +eval instant at 50m resets(http_requests[10m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 -eval instant at 50m resets(http_requests[300]) +eval instant at 50m resets(http_requests[600]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 @@ -21,6 +23,11 @@ eval instant at 50m resets(http_requests[20m]) {path="/biz"} 0 eval instant at 50m resets(http_requests[30m]) + {path="/foo"} 1 + {path="/bar"} 0 + {path="/biz"} 0 + +eval instant at 50m resets(http_requests[32m]) {path="/foo"} 2 {path="/bar"} 1 {path="/biz"} 0 @@ -34,28 +41,30 @@ eval instant at 50m resets(nonexistent_metric[50m]) # Tests for changes(). eval instant at 50m changes(http_requests[5m]) + +eval instant at 50m changes(http_requests[6m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 eval instant at 50m changes(http_requests[20m]) - {path="/foo"} 3 - {path="/bar"} 3 + {path="/foo"} 2 + {path="/bar"} 2 {path="/biz"} 0 eval instant at 50m changes(http_requests[30m]) - {path="/foo"} 4 - {path="/bar"} 5 - {path="/biz"} 1 + {path="/foo"} 3 + {path="/bar"} 4 + {path="/biz"} 0 eval instant at 50m changes(http_requests[50m]) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes((http_requests[50m])) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes(nonexistent_metric[50m]) @@ -66,7 +75,7 @@ load 5m x{a="b"} NaN NaN NaN x{a="c"} 0 NaN 0 -eval instant at 15m changes(x[15m]) +eval instant at 15m changes(x[20m]) {a="b"} 0 {a="c"} 2 @@ -75,14 +84,14 @@ clear # Tests for increase(). load 5m http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+10x5 0+10x5 + http_requests{path="/bar"} 0+18x5 0+18x5 http_requests{path="/dings"} 10+10x10 http_requests{path="/bumms"} 1+10x10 # Tests for increase(). eval instant at 50m increase(http_requests[50m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 160 {path="/dings"} 100 {path="/bumms"} 100 @@ -95,7 +104,7 @@ eval instant at 50m increase(http_requests[50m]) # value, and therefore the extrapolation happens only by 30s. eval instant at 50m increase(http_requests[100m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 162 {path="/dings"} 105 {path="/bumms"} 101 @@ -115,15 +124,17 @@ clear # Tests for rate(). load 5m - testcounter_reset_middle 0+10x4 0+10x5 + testcounter_reset_middle 0+27x4 0+27x5 testcounter_reset_end 0+10x9 0 10 # Counter resets at in the middle of range are handled correctly by rate(). eval instant at 50m rate(testcounter_reset_middle[50m]) - {} 0.03 + {} 0.08 # Counter resets at end of range are ignored by rate(). eval instant at 50m rate(testcounter_reset_end[5m]) + +eval instant at 50m rate(testcounter_reset_end[6m]) {} 0 clear @@ -242,24 +253,24 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) - {} 76.81818181818181 + {} 70 eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) - {} 76.81818181818181 + {} 70 # intercept at t = 3000+3600 = 6600 -eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 76.81818181818181 -eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h) +eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h) {} 76.81818181818181 # intercept at t = 600+3600 = 4200 -eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 -eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 89.54545454545455 # With http_requests, there is a sample value exactly at the end of @@ -467,7 +478,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -502,7 +513,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -640,7 +651,7 @@ eval_ordered instant at 50m sort_by_label(node_uname_info, "release") node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 -# Tests for holt_winters +# Tests for double_exponential_smoothing clear # positive trends @@ -650,7 +661,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 8000 {job="api-server", instance="1", group="production"} 16000 {job="api-server", instance="0", group="canary"} 24000 @@ -664,7 +675,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000 http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 0 {job="api-server", instance="1", group="production"} -16000 {job="api-server", instance="0", group="canary"} 24000 @@ -688,10 +699,10 @@ load 10s metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307 metric10 -9.988465674311579e+307 9.988465674311579e+307 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 55s avg_over_time(metric[1m]) {} 3 -eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m]) +eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m]) {} 3 eval instant at 1m avg_over_time(metric2[1m]) @@ -758,8 +769,8 @@ eval instant at 1m avg_over_time(metric8[1m]) {} 9.988465674311579e+307 # This overflows float64. -eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m]) - {} Inf +eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m]) + {} +Inf eval instant at 1m avg_over_time(metric9[1m]) {} -9.988465674311579e+307 @@ -768,10 +779,16 @@ eval instant at 1m avg_over_time(metric9[1m]) eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m]) {} -Inf -eval instant at 1m avg_over_time(metric10[1m]) +eval instant at 45s avg_over_time(metric10[1m]) + {} 0 + +eval instant at 1m avg_over_time(metric10[2m]) {} 0 -eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m]) +eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m]) + {} 0 + +eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m]) {} 0 # Test if very big intermediate values cause loss of detail. @@ -779,10 +796,10 @@ clear load 10s metric 1 1e100 1 -1e100 -eval instant at 1m sum_over_time(metric[1m]) +eval instant at 1m sum_over_time(metric[2m]) {} 2 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 1m avg_over_time(metric[2m]) {} 0.5 # Tests for stddev_over_time and stdvar_over_time. @@ -790,13 +807,13 @@ clear load 10s metric 0 8 8 2 3 -eval instant at 1m stdvar_over_time(metric[1m]) +eval instant at 1m stdvar_over_time(metric[2m]) {} 10.56 -eval instant at 1m stddev_over_time(metric[1m]) +eval instant at 1m stddev_over_time(metric[2m]) {} 3.249615 -eval instant at 1m stddev_over_time((metric[1m])) +eval instant at 1m stddev_over_time((metric[2m])) {} 3.249615 # Tests for stddev_over_time and stdvar_over_time #4927. @@ -826,42 +843,42 @@ load 10s data{test="three samples"} 0 1 2 data{test="uneven samples"} 0 1 4 -eval instant at 1m quantile_over_time(0, data[1m]) +eval instant at 1m quantile_over_time(0, data[2m]) {test="two samples"} 0 {test="three samples"} 0 {test="uneven samples"} 0 -eval instant at 1m quantile_over_time(0.5, data[1m]) +eval instant at 1m quantile_over_time(0.5, data[2m]) {test="two samples"} 0.5 {test="three samples"} 1 {test="uneven samples"} 1 -eval instant at 1m quantile_over_time(0.75, data[1m]) +eval instant at 1m quantile_over_time(0.75, data[2m]) {test="two samples"} 0.75 {test="three samples"} 1.5 {test="uneven samples"} 2.5 -eval instant at 1m quantile_over_time(0.8, data[1m]) +eval instant at 1m quantile_over_time(0.8, data[2m]) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 -eval instant at 1m quantile_over_time(1, data[1m]) +eval instant at 1m quantile_over_time(1, data[2m]) {test="two samples"} 1 {test="three samples"} 2 {test="uneven samples"} 4 -eval_warn instant at 1m quantile_over_time(-1, data[1m]) +eval_warn instant at 1m quantile_over_time(-1, data[2m]) {test="two samples"} -Inf {test="three samples"} -Inf {test="uneven samples"} -Inf -eval_warn instant at 1m quantile_over_time(2, data[1m]) +eval_warn instant at 1m quantile_over_time(2, data[2m]) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf -eval_warn instant at 1m (quantile_over_time(2, (data[1m]))) +eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf @@ -969,21 +986,21 @@ load 10s data{type="some_nan3"} NaN 0 1 data{type="only_nan"} NaN NaN NaN -eval instant at 1m min_over_time(data[1m]) +eval instant at 1m min_over_time(data[2m]) {type="numbers"} 0 {type="some_nan"} 0 {type="some_nan2"} 1 {type="some_nan3"} 0 {type="only_nan"} NaN -eval instant at 1m max_over_time(data[1m]) +eval instant at 1m max_over_time(data[2m]) {type="numbers"} 3 {type="some_nan"} 2 {type="some_nan2"} 2 {type="some_nan3"} 1 {type="only_nan"} NaN -eval instant at 1m last_over_time(data[1m]) +eval instant at 1m last_over_time(data[2m]) data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 @@ -1076,13 +1093,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s]) {} 1 eval instant at 15m absent_over_time(http_requests[5m]) - -eval instant at 16m absent_over_time(http_requests[5m]) {} 1 +eval instant at 15m absent_over_time(http_requests[10m]) + eval instant at 16m absent_over_time(http_requests[6m]) + {} 1 + +eval instant at 16m absent_over_time(http_requests[16m]) eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) + {} 1 + +eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m]) eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m]) @@ -1138,17 +1161,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) eval instant at 15m present_over_time(http_requests[5m]) + +eval instant at 15m present_over_time(http_requests[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[5m]) - eval instant at 16m present_over_time(http_requests[6m]) + +eval instant at 16m present_over_time(http_requests[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 eval instant at 16m present_over_time(httpd_handshake_failures_total[1m]) - {instance="127.0.0.1", job="node"} 1 eval instant at 16m present_over_time({instance="127.0.0.1"}[5m]) {instance="127.0.0.1",job="node"} 1 @@ -1169,59 +1193,59 @@ load 5m exp_root_log{l="x"} 10 exp_root_log{l="y"} 20 -eval instant at 5m exp(exp_root_log) +eval instant at 1m exp(exp_root_log) {l="x"} 22026.465794806718 {l="y"} 485165195.4097903 -eval instant at 5m exp(exp_root_log - 10) +eval instant at 1m exp(exp_root_log - 10) {l="y"} 22026.465794806718 {l="x"} 1 -eval instant at 5m exp(exp_root_log - 20) +eval instant at 1m exp(exp_root_log - 20) {l="x"} 4.5399929762484854e-05 {l="y"} 1 -eval instant at 5m ln(exp_root_log) +eval instant at 1m ln(exp_root_log) {l="x"} 2.302585092994046 {l="y"} 2.995732273553991 -eval instant at 5m ln(exp_root_log - 10) +eval instant at 1m ln(exp_root_log - 10) {l="y"} 2.302585092994046 {l="x"} -Inf -eval instant at 5m ln(exp_root_log - 20) +eval instant at 1m ln(exp_root_log - 20) {l="y"} -Inf {l="x"} NaN -eval instant at 5m exp(ln(exp_root_log)) +eval instant at 1m exp(ln(exp_root_log)) {l="y"} 20 {l="x"} 10 -eval instant at 5m sqrt(exp_root_log) +eval instant at 1m sqrt(exp_root_log) {l="x"} 3.1622776601683795 {l="y"} 4.47213595499958 -eval instant at 5m log2(exp_root_log) +eval instant at 1m log2(exp_root_log) {l="x"} 3.3219280948873626 {l="y"} 4.321928094887363 -eval instant at 5m log2(exp_root_log - 10) +eval instant at 1m log2(exp_root_log - 10) {l="y"} 3.3219280948873626 {l="x"} -Inf -eval instant at 5m log2(exp_root_log - 20) +eval instant at 1m log2(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf -eval instant at 5m log10(exp_root_log) +eval instant at 1m log10(exp_root_log) {l="x"} 1 {l="y"} 1.301029995663981 -eval instant at 5m log10(exp_root_log - 10) +eval instant at 1m log10(exp_root_log - 10) {l="y"} 1 {l="x"} -Inf -eval instant at 5m log10(exp_root_log - 20) +eval instant at 1m log10(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf @@ -1234,3 +1258,12 @@ load 1m # We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. eval range from 0 to 61s step 1s timestamp(metric) {} 0x59 60 60 + +clear + +# Check round with mixed data types +load 1m + mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} + +eval range from 0 to 5m step 1m round(mixed_metric) + {} _ 1 2 3 diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 47cba799352..6089fd01d20 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -108,8 +108,8 @@ eval instant at 50m histogram_stdvar(testhistogram3) eval instant at 50m histogram_fraction(0, 0.2, testhistogram3) {start="positive"} 0.6363636363636364 {start="negative"} 0 - -eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) + +eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m])) {start="positive"} 0.6363636363636364 {start="negative"} 0 @@ -118,8 +118,8 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count {start="positive"} 0.6363636363636364 - -eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m]) + +eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m]) {start="positive"} 0.6363636363636364 # Test histogram_quantile, native and classic. @@ -241,28 +241,27 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) {start="negative"} 0.3 # More realistic with rates. - -eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m])) {start="positive"} 0.72 {start="negative"} 0.3 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) {start="positive"} 0.72 {start="negative"} 0.3 @@ -307,115 +306,112 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) # Aggregated histogram: Everything in one. Note how native histograms # don't require aggregation by le. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m]))) {} 0.075 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m]))) {} 0.1277777777777778 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m]))) {} 0.075 -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m]))) {} 0.12777777777777778 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.12777777777777778 # Aggregated histogram: By instance. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. - -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job)) {job="job1"} 0.14 {job="job2"} 0.1125 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. - -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. - -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.11666666666666667 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -425,6 +421,25 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket eval instant at 50m sum(request_duration_seconds) {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} +eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"}) + {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} + +eval instant at 50m avg(request_duration_seconds) + {} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}} + +# To verify the result above, calculate from classic histogram as well. +eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"}) + {} 25 + +eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"}) + {} 22.5 + +eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"}) + {} 15 + +eval instant at 50m count(request_duration_seconds) + {} 4 + # A histogram with nonmonotonic bucket counts. This may happen when recording # rule evaluation or federation races scrape ingestion, causing some buckets # counts to be derived from fewer samples. @@ -448,19 +463,19 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. -eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.5, rate(mixed[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN @@ -469,7 +484,7 @@ load_with_nhcb 5m empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 -eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m])) {instance="ins1", job="job1"} NaN # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set. @@ -508,3 +523,36 @@ eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_buc eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) {} NaN + +load_with_nhcb 1m + histogram_over_time_bucket{le="0"} 0 1 3 9 + histogram_over_time_bucket{le="1"} 2 3 3 9 + histogram_over_time_bucket{le="2"} 3 8 5 10 + histogram_over_time_bucket{le="4"} 3 10 6 18 + +# Test custom buckets with sum_over_time, avg_over_time. +eval instant at 3m sum_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:37 custom_values:[0 1 2 4] buckets:[13 4 9 11]}} + +eval instant at 3m avg_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:9.25 custom_values:[0 1 2 4] buckets:[3.25 1 2.25 2.75]}} + +# Test custom buckets with counter reset +load_with_nhcb 5m + histogram_with_reset_bucket{le="1"} 1 3 9 + histogram_with_reset_bucket{le="2"} 3 3 9 + histogram_with_reset_bucket{le="4"} 8 5 12 + histogram_with_reset_bucket{le="8"} 10 6 18 + histogram_with_reset_sum{} 36 16 61 + +eval instant at 10m increase(histogram_with_reset[15m]) + {} {{schema:-53 count:27 sum:91.5 custom_values:[1 2 4 8] counter_reset_hint:gauge buckets:[13.5 0 4.5 9]}} + +eval instant at 10m resets(histogram_with_reset[15m]) + {} 1 + +eval instant at 10m histogram_count(increase(histogram_with_reset[15m])) + {} 27 + +eval instant at 10m histogram_sum(increase(histogram_with_reset[15m])) + {} 91.5 diff --git a/promql/promqltest/testdata/name_label_dropping.test b/promql/promqltest/testdata/name_label_dropping.test index c8c0eb285f8..fb25766603d 100644 --- a/promql/promqltest/testdata/name_label_dropping.test +++ b/promql/promqltest/testdata/name_label_dropping.test @@ -4,31 +4,31 @@ load 5m another_metric{env="1"} 60 120 180 # Does not drop __name__ for vector selector -eval instant at 15m metric{env="1"} +eval instant at 10m metric{env="1"} metric{env="1"} 120 # Drops __name__ for unary operators -eval instant at 15m -metric +eval instant at 10m -metric {env="1"} -120 # Drops __name__ for binary operators -eval instant at 15m metric + another_metric +eval instant at 10m metric + another_metric {env="1"} 300 # Does not drop __name__ for binary comparison operators -eval instant at 15m metric <= another_metric +eval instant at 10m metric <= another_metric metric{env="1"} 120 # Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 15m metric <= bool another_metric +eval instant at 10m metric <= bool another_metric {env="1"} 1 # Drops __name__ for vector-scalar operations -eval instant at 15m metric * 2 +eval instant at 10m metric * 2 {env="1"} 240 # Drops __name__ for instant-vector functions -eval instant at 15m clamp(metric, 0, 100) +eval instant at 10m clamp(metric, 0, 100) {env="1"} 100 # Drops __name__ for round function @@ -36,53 +36,53 @@ eval instant at 15m round(metric) {env="1"} 120 # Drops __name__ for range-vector functions -eval instant at 15m rate(metric{env="1"}[10m]) +eval instant at 10m rate(metric{env="1"}[10m]) {env="1"} 0.2 # Does not drop __name__ for last_over_time function -eval instant at 15m last_over_time(metric{env="1"}[10m]) +eval instant at 10m last_over_time(metric{env="1"}[10m]) metric{env="1"} 120 # Drops name for other _over_time functions -eval instant at 15m max_over_time(metric{env="1"}[10m]) +eval instant at 10m max_over_time(metric{env="1"}[10m]) {env="1"} 120 # Allows relabeling (to-be-dropped) __name__ via label_replace -eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") +eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") {my_name="rate_metric", env="1"} 0.2 {my_name="rate_another_metric", env="1"} 0.2 # Allows preserving __name__ via label_replace -eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") +eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") rate_metric{env="1"} 0.2 rate_another_metric{env="1"} 0.2 # Allows relabeling (to-be-dropped) __name__ via label_join -eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") +eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") {my_name="metric", env="1"} 0.2 {my_name="another_metric", env="1"} 0.2 # Allows preserving __name__ via label_join -eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") +eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") metric_1{env="1"} 0.2 another_metric_1{env="1"} 0.2 # Does not drop metric names fro aggregation operators -eval instant at 15m sum by (__name__, env) (metric{env="1"}) +eval instant at 10m sum by (__name__, env) (metric{env="1"}) metric{env="1"} 120 # Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) # This is an accidental side effect of delayed __name__ label dropping -eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m])) +eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) # Aggregation operators aggregate metrics with same labelset and to-be-dropped names # This is an accidental side effect of delayed __name__ label dropping -eval instant at 15m sum(rate({env="1"}[10m])) by (env) +eval instant at 10m sum(rate({env="1"}[10m])) by (env) {env="1"} 0.4 # Aggregationk operators propagate __name__ label dropping information -eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"})) +eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"})) metric{env="1"} 120 -eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) +eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) {env="1"} 0.2 diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 7d2eec32cfa..8c5814ae8a0 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -2,55 +2,58 @@ load 5m empty_histogram {{}} -eval instant at 5m empty_histogram +eval instant at 1m empty_histogram {__name__="empty_histogram"} {{}} -eval instant at 5m histogram_count(empty_histogram) +eval instant at 1m histogram_count(empty_histogram) {} 0 -eval instant at 5m histogram_sum(empty_histogram) +eval instant at 1m histogram_sum(empty_histogram) {} 0 -eval instant at 5m histogram_avg(empty_histogram) +eval instant at 1m histogram_avg(empty_histogram) {} NaN -eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram) +eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram) {} NaN -eval instant at 5m histogram_fraction(0, 8, empty_histogram) +eval instant at 1m histogram_fraction(0, 8, empty_histogram) {} NaN - +clear # buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4). load 5m single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}} # histogram_count extracts the count property from the histogram. -eval instant at 5m histogram_count(single_histogram) +eval instant at 1m histogram_count(single_histogram) {} 4 # histogram_sum extracts the sum property from the histogram. -eval instant at 5m histogram_sum(single_histogram) +eval instant at 1m histogram_sum(single_histogram) {} 5 # histogram_avg calculates the average from sum and count properties. -eval instant at 5m histogram_avg(single_histogram) +eval instant at 1m histogram_avg(single_histogram) {} 1.25 # We expect half of the values to fall in the range 1 < x <= 2. -eval instant at 5m histogram_fraction(1, 2, single_histogram) +eval instant at 1m histogram_fraction(1, 2, single_histogram) {} 0.5 # We expect all values to fall in the range 0 < x <= 8. -eval instant at 5m histogram_fraction(0, 8, single_histogram) +eval instant at 1m histogram_fraction(0, 8, single_histogram) {} 1 -# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2. -eval instant at 5m histogram_quantile(0.5, single_histogram) - {} 1.5 - +# Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to +# exponential interpolation, i.e. the "midpoint" within range 1 < x <= +# 2 is assumed where the bucket boundary would be if we increased the +# resolution of the histogram by one step. +eval instant at 1m histogram_quantile(0.5, single_histogram) + {} 1.414213562373095 +clear # Repeat the same histogram 10 times. load 5m @@ -68,8 +71,9 @@ eval instant at 5m histogram_avg(multi_histogram) eval instant at 5m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, multi_histogram) - {} 1.5 + {} 1.414213562373095 # Each entry should look the same as the first. @@ -85,10 +89,11 @@ eval instant at 50m histogram_avg(multi_histogram) eval instant at 50m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, multi_histogram) - {} 1.5 - + {} 1.414213562373095 +clear # Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket # with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket @@ -109,8 +114,9 @@ eval instant at 5m histogram_avg(incr_histogram) eval instant at 5m histogram_fraction(1, 2, incr_histogram) {} 0.6 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 eval instant at 50m incr_histogram @@ -129,18 +135,20 @@ eval instant at 50m histogram_avg(incr_histogram) eval instant at 50m histogram_fraction(1, 2, incr_histogram) {} 0.8571428571428571 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 # Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. -eval instant at 50m rate(incr_histogram[5m]) - {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} +eval instant at 50m rate(incr_histogram[10m]) + {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} # Calculate the 50th percentile of observations over the last 10m. +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) - {} 1.5 - + {} 1.414213562373095 +clear # Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.: # 0: 1 2 4 8 16 32 64 (higher resolution) @@ -166,77 +174,79 @@ eval instant at 5m histogram_avg(low_res_histogram) eval instant at 5m histogram_fraction(1, 4, low_res_histogram) {} 1 - +clear # z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range # 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket. load 5m single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}} -eval instant at 5m histogram_count(single_zero_histogram) +eval instant at 1m histogram_count(single_zero_histogram) {} 1 -eval instant at 5m histogram_sum(single_zero_histogram) +eval instant at 1m histogram_sum(single_zero_histogram) {} 0.25 -eval instant at 5m histogram_avg(single_zero_histogram) +eval instant at 1m histogram_avg(single_zero_histogram) {} 0.25 # When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally # distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the # entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5. -eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram) +eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram) {} 1 # Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5. -eval instant at 5m histogram_quantile(0.5, single_zero_histogram) +eval instant at 1m histogram_quantile(0.5, single_zero_histogram) {} 0 - +clear # Let's turn single_histogram upside-down. load 5m negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}} -eval instant at 5m histogram_count(negative_histogram) +eval instant at 1m histogram_count(negative_histogram) {} 4 -eval instant at 5m histogram_sum(negative_histogram) +eval instant at 1m histogram_sum(negative_histogram) {} -5 -eval instant at 5m histogram_avg(negative_histogram) +eval instant at 1m histogram_avg(negative_histogram) {} -1.25 # We expect half of the values to fall in the range -2 < x <= -1. -eval instant at 5m histogram_fraction(-2, -1, negative_histogram) +eval instant at 1m histogram_fraction(-2, -1, negative_histogram) {} 0.5 -eval instant at 5m histogram_quantile(0.5, negative_histogram) - {} -1.5 - +# Exponential interpolation works the same as for positive buckets, just mirrored. +eval instant at 1m histogram_quantile(0.5, negative_histogram) + {} -1.414213562373095 +clear # Two histogram samples. load 5m two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}} # We expect to see the newest sample. -eval instant at 10m histogram_count(two_samples_histogram) +eval instant at 5m histogram_count(two_samples_histogram) {} 4 -eval instant at 10m histogram_sum(two_samples_histogram) +eval instant at 5m histogram_sum(two_samples_histogram) {} -4 -eval instant at 10m histogram_avg(two_samples_histogram) +eval instant at 5m histogram_avg(two_samples_histogram) {} -1 -eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram) +eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram) {} 0.5 -eval instant at 10m histogram_quantile(0.5, two_samples_histogram) - {} -1.5 - +# See explanation for exponential interpolation above. +eval instant at 5m histogram_quantile(0.5, two_samples_histogram) + {} -1.414213562373095 +clear # Add two histograms with negated data. load 5m @@ -259,6 +269,8 @@ eval instant at 5m histogram_fraction(0, 4, balanced_histogram) eval instant at 5m histogram_quantile(0.5, balanced_histogram) {} 0.5 +clear + # Add histogram to test sum(last_over_time) regression load 5m incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10 @@ -270,6 +282,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram)) eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) {} 30 +clear + # Apply rate function to histogram. load 15s histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100 @@ -280,6 +294,8 @@ eval instant at 5m rate(histogram_rate[45s]) eval range from 5m to 5m30s step 30s rate(histogram_rate[45s]) {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1 +clear + # Apply count and sum function to histogram. load 10m histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -290,6 +306,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2) eval instant at 10m histogram_sum(histogram_count_sum_2) {} 100 +clear + # Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res). load 10m histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1 @@ -300,6 +318,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1) {} 1.163807968526718 +clear + # Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res). load 10m histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1 @@ -310,6 +330,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2) {} 2.3971123370139447e-05 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}. load 10m histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -320,6 +342,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) {} 1844.4651144196398 +clear + # Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}. load 10m histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1 @@ -330,6 +354,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4) {} 759352122.1939945 +clear + # Apply stddev and stdvar function to histogram with {-10x10}. load 10m histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1 @@ -340,6 +366,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5) {} 1.725830020304794 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}. load 10m histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -350,6 +378,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6) {} NaN +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}. load 10m histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -360,6 +390,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7) {} Inf +clear + # Apply quantile function to histogram with all positive buckets with zero bucket. load 10m histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -370,20 +402,24 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1) eval instant at 10m histogram_quantile(1, histogram_quantile_1) {} 16 +# The following quantiles are within a bucket. Exponential +# interpolation is applied (rather than linear, as it is done for +# classic histograms), leading to slightly different quantile values. eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) - {} 15.759999999999998 + {} 15.67072476139083 eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) - {} 13.600000000000001 + {} 12.99603834169977 eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) - {} 4.799999999999997 + {} 4.594793419988138 eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) - {} 1.6666666666666665 + {} 1.5874010519681994 +# Linear interpolation within the zero bucket after all. eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) - {} 0.0006000000000000001 + {} 0.0006 eval instant at 10m histogram_quantile(0, histogram_quantile_1) {} 0 @@ -391,6 +427,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_1) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1) {} -Inf +clear + # Apply quantile function to histogram with all negative buckets with zero bucket. load 10m histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 @@ -401,17 +439,20 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2) eval instant at 10m histogram_quantile(1, histogram_quantile_2) {} 0 +# Again, the quantile values here are slightly different from what +# they would be with linear interpolation. Note that quantiles +# ending up in the zero bucket are linearly interpolated after all. eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) - {} -6.000000000000048e-05 + {} -0.00006 eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) - {} -0.0005999999999999996 + {} -0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) - {} -1.6666666666666667 + {} -1.5874010519681996 eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) - {} -13.6 + {} -12.996038341699768 eval instant at 10m histogram_quantile(0, histogram_quantile_2) {} -16 @@ -419,7 +460,11 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_2) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2) {} -Inf -# Apply quantile function to histogram with both positive and negative buckets with zero bucket. +clear + +# Apply quantile function to histogram with both positive and negative +# buckets with zero bucket. +# First positive buckets with exponential interpolation. load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -430,31 +475,34 @@ eval instant at 10m histogram_quantile(1, histogram_quantile_3) {} 16 eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) - {} 15.519999999999996 + {} 15.34822590920423 eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) - {} 11.200000000000003 + {} 10.556063286183155 eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) - {} 1.2666666666666657 + {} 1.2030250360821164 +# Linear interpolation in the zero bucket, symmetrically centered around +# the zero point. eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) - {} 0.0006000000000000005 + {} 0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) {} 0 eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) - {} -0.0005999999999999996 + {} -0.0006 +# Finally negative buckets with mirrored exponential interpolation. eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) - {} -1.266666666666667 + {} -1.2030250360821169 eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) - {} -11.2 + {} -10.556063286183155 eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) - {} -15.52 + {} -15.34822590920423 eval instant at 10m histogram_quantile(0, histogram_quantile_3) {} -16 @@ -462,6 +510,92 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_3) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3) {} -Inf +clear + +# Try different schemas. (The interpolation logic must not depend on the schema.) +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 2.0 + {schema="0"} 1.4142135623730951 + {schema="+1"} 1.189207 + +eval instant at 1m histogram_fraction(0, 2, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.4142135623730951, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.189207, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# The same as above, but one bucket "further to the right". +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 8.0 + {schema="0"} 2.82842712474619 + {schema="+1"} 1.6817928305074292 + +eval instant at 1m histogram_fraction(0, 8, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 2.82842712474619, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.6817928305074292, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# And everything again but for negative buckets. +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -2.0 + {schema="0"} -1.4142135623730951 + {schema="+1"} -1.189207 + +eval instant at 1m histogram_fraction(-2, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-1.4142135623730951, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.189207, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -8.0 + {schema="0"} -2.82842712474619 + {schema="+1"} -1.6817928305074292 + +eval instant at 1m histogram_fraction(-8, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-2.82842712474619, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.6817928305074292, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + + # Apply fraction function to empty histogram. load 10m histogram_fraction_1 {{}}x1 @@ -469,6 +603,8 @@ load 10m eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1) {} NaN +clear + # Apply fraction function to histogram with positive and zero buckets. load 10m histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -485,11 +621,18 @@ eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2) eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) {} 0.16666666666666666 +# Note that this result and the one above add up to 1. +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) + {} 0.8333333333333334 + +# We are in the zero bucket, resulting in linear interpolation eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) {} 0.08333333333333333 -eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) - {} 0.8333333333333334 +# Demonstrate that the inverse operation with histogram_quantile yields +# the original value with the non-trivial result above. +eval instant at 10m histogram_quantile(0.08333333333333333, histogram_fraction_2) + {} 0.0005 eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) {} 0 @@ -497,17 +640,30 @@ eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) {} 0.25 +# More non-trivial results with interpolation involved below, including +# some round-trips via histogram_quantile to prove that the inverse +# operation leads to the same results. + +eval instant at 10m histogram_fraction(0, 1.5, histogram_fraction_2) + {} 0.4795739585136224 + eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(0, 6, histogram_fraction_2) + {} 0.6320802083934297 + +eval instant at 10m histogram_quantile(0.6320802083934297, histogram_fraction_2) + {} 6 + eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) {} 0 @@ -570,6 +726,12 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3) eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_3) + {} 0.9166666666666666 + +eval instant at 10m histogram_quantile(0.9166666666666666, histogram_fraction_3) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) {} 0 @@ -595,16 +757,22 @@ eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3) {} 0.25 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(-inf, -6, histogram_fraction_3) + {} 0.36791979160657035 + +eval instant at 10m histogram_quantile(0.36791979160657035, histogram_fraction_3) + {} -6 + eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) {} 0 @@ -633,6 +801,8 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3) eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3) {} 1 +clear + # Apply fraction function to histogram with both positive, negative and zero buckets. load 10m histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -652,6 +822,18 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4) eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, 0.0005, histogram_fraction_4) + {} 0.5416666666666666 + +eval instant at 10m histogram_quantile(0.5416666666666666, histogram_fraction_4) + {} 0.0005 + +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_4) + {} 0.4583333333333333 + +eval instant at 10m histogram_quantile(0.4583333333333333, histogram_fraction_4) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) {} 0.4166666666666667 @@ -662,31 +844,31 @@ eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855414 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004825 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990366 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855456 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004817 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990362 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) {} 0 @@ -766,18 +948,40 @@ eval instant at 10m histogram_mul_div*float_series_0 eval instant at 10m float_series_0*histogram_mul_div {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} -# TODO: (NeerajGartia21) remove all the histogram buckets in case of division with zero. See: https://github.com/prometheus/prometheus/issues/13934 eval instant at 10m histogram_mul_div/0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div/float_series_0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div*0/0 - {} {{schema:0 count:NaN sum:NaN z_bucket:NaN z_bucket_w:0.001 buckets:[NaN NaN NaN] n_buckets:[NaN NaN NaN]}} + {} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}} + +eval_info instant at 10m histogram_mul_div*histogram_mul_div + +eval_info instant at 10m histogram_mul_div/histogram_mul_div + +eval_info instant at 10m float_series_3/histogram_mul_div + +eval_info instant at 10m 0/histogram_mul_div clear +# Apply binary operators to mixed histogram and float samples. +# TODO:(NeerajGartia21) move these tests to their respective locations when tests from engine_test.go are be moved here. + +load 10m + histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + float_sample 0x1 + +eval_info instant at 10m float_sample+histogram_sample + +eval_info instant at 10m histogram_sample+float_sample + +eval_info instant at 10m float_sample-histogram_sample + +eval_info instant at 10m histogram_sample-float_sample + # Counter reset only noticeable in a single bucket. load 5m reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}} @@ -814,7 +1018,7 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} # Test the case where we only have two points for rate -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate @@ -836,11 +1040,11 @@ eval_warn instant at 1m30s rate(some_metric[1m]) # Should produce no results. # Start with custom, end with exponential. -eval_warn instant at 1m rate(some_metric[30s]) +eval_warn instant at 1m rate(some_metric[1m]) # Should produce no results. # Start with exponential, end with custom. -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) # Should produce no results. clear @@ -975,8 +1179,8 @@ clear load 1m histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}} -eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} -eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} diff --git a/promql/promqltest/testdata/operators.test b/promql/promqltest/testdata/operators.test index df2311b9bae..645cca88b85 100644 --- a/promql/promqltest/testdata/operators.test +++ b/promql/promqltest/testdata/operators.test @@ -113,7 +113,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"} http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60 +eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 @@ -308,65 +308,65 @@ load 5m threshold{instance="abc",job="node",target="a@b.com"} 0 # Copy machine role to node variable. -eval instant at 5m node_role * on (instance) group_right (role) node_var +eval instant at 1m node_role * on (instance) group_right (role) node_var {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_var * on (instance) group_left (role) node_role +eval instant at 1m node_var * on (instance) group_left (role) node_role {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_var * ignoring (role) group_left (role) node_role +eval instant at 1m node_var * ignoring (role) group_left (role) node_role {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_role * ignoring (role) group_right (role) node_var +eval instant at 1m node_role * ignoring (role) group_right (role) node_var {instance="abc",job="node",role="prometheus"} 2 # Copy machine role to node variable with instrumentation labels. -eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role +eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role {instance="abc",job="node",mode="idle",role="prometheus"} 3 {instance="abc",job="node",mode="user",role="prometheus"} 1 -eval instant at 5m node_cpu * on (instance) group_left (role) node_role +eval instant at 1m node_cpu * on (instance) group_left (role) node_role {instance="abc",job="node",mode="idle",role="prometheus"} 3 {instance="abc",job="node",mode="user",role="prometheus"} 1 # Ratio of total. -eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) +eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) +eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) {job="node",mode="idle"} 0.7857142857142857 {job="node",mode="user"} 0.21428571428571427 -eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) +eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) {} 1.0 -eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) +eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) +eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) +eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) {job="node",mode="idle"} 0.7857142857142857 {job="node",mode="user"} 0.21428571428571427 -eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) +eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) {} 1.0 # Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here). -eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 +eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0 {instance="abc",job="node",mode="idle",foo="bar"} 3 {instance="abc",job="node",mode="user",foo="bar"} 1 {instance="def",job="node",mode="idle",foo="bar"} 8 @@ -374,12 +374,12 @@ eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 # Use threshold from metric, and copy over target. -eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold +eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 # Use threshold from metric, and a default (1) if it's not present. -eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) +eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 node_cpu{instance="def",job="node",mode="idle"} 8 @@ -387,37 +387,37 @@ eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or # Check that binops drop the metric name. -eval instant at 5m node_cpu + 2 +eval instant at 1m node_cpu + 2 {instance="abc",job="node",mode="idle"} 5 {instance="abc",job="node",mode="user"} 3 {instance="def",job="node",mode="idle"} 10 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu - 2 +eval instant at 1m node_cpu - 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} -1 {instance="def",job="node",mode="idle"} 6 {instance="def",job="node",mode="user"} 0 -eval instant at 5m node_cpu / 2 +eval instant at 1m node_cpu / 2 {instance="abc",job="node",mode="idle"} 1.5 {instance="abc",job="node",mode="user"} 0.5 {instance="def",job="node",mode="idle"} 4 {instance="def",job="node",mode="user"} 1 -eval instant at 5m node_cpu * 2 +eval instant at 1m node_cpu * 2 {instance="abc",job="node",mode="idle"} 6 {instance="abc",job="node",mode="user"} 2 {instance="def",job="node",mode="idle"} 16 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu ^ 2 +eval instant at 1m node_cpu ^ 2 {instance="abc",job="node",mode="idle"} 9 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 64 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu % 2 +eval instant at 1m node_cpu % 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 0 @@ -432,14 +432,14 @@ load 5m metricB{baz="meh"} 4 # On with no labels, for metrics with no common labels. -eval instant at 5m random + on() metricA +eval instant at 1m random + on() metricA {} 5 # Ignoring with no labels is the same as no ignoring. -eval instant at 5m metricA + ignoring() metricB +eval instant at 1m metricA + ignoring() metricB {baz="meh"} 7 -eval instant at 5m metricA + metricB +eval instant at 1m metricA + metricB {baz="meh"} 7 clear @@ -457,16 +457,16 @@ load 5m test_total{instance="localhost"} 50 test_smaller{instance="localhost"} 10 -eval instant at 5m test_total > bool test_smaller +eval instant at 1m test_total > bool test_smaller {instance="localhost"} 1 -eval instant at 5m test_total > test_smaller +eval instant at 1m test_total > test_smaller test_total{instance="localhost"} 50 -eval instant at 5m test_total < bool test_smaller +eval instant at 1m test_total < bool test_smaller {instance="localhost"} 0 -eval instant at 5m test_total < test_smaller +eval instant at 1m test_total < test_smaller clear @@ -476,14 +476,14 @@ load 5m trigx{} 20 trigNaN{} NaN -eval instant at 5m trigy atan2 trigx +eval instant at 1m trigy atan2 trigx {} 0.4636476090008061 -eval instant at 5m trigy atan2 trigNaN +eval instant at 1m trigy atan2 trigNaN {} NaN -eval instant at 5m 10 atan2 20 +eval instant at 1m 10 atan2 20 0.4636476090008061 -eval instant at 5m 10 atan2 NaN +eval instant at 1m 10 atan2 NaN NaN diff --git a/promql/promqltest/testdata/range_queries.test b/promql/promqltest/testdata/range_queries.test index e6951096026..3bfe2ce4cb3 100644 --- a/promql/promqltest/testdata/range_queries.test +++ b/promql/promqltest/testdata/range_queries.test @@ -1,18 +1,18 @@ # sum_over_time with all values -load 30s +load 15s bar 0 1 10 100 1000 -eval range from 0 to 2m step 1m sum_over_time(bar[30s]) +eval range from 0 to 1m step 30s sum_over_time(bar[30s]) {} 0 11 1100 clear # sum_over_time with trailing values -load 30s +load 15s bar 0 1 10 100 1000 0 0 0 0 eval range from 0 to 2m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 + {} 0 1100 0 clear @@ -21,15 +21,15 @@ load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000 eval range from 0 to 4m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 110000 11000000 + {} 0 10 1000 100000 10000000 clear # sum_over_time with all values random -load 30s +load 15s bar 5 17 42 2 7 905 51 -eval range from 0 to 3m step 1m sum_over_time(bar[30s]) +eval range from 0 to 90s step 30s sum_over_time(bar[30s]) {} 5 59 9 956 clear diff --git a/promql/promqltest/testdata/staleness.test b/promql/promqltest/testdata/staleness.test index 4fdbc997b7f..a48473d4398 100644 --- a/promql/promqltest/testdata/staleness.test +++ b/promql/promqltest/testdata/staleness.test @@ -14,10 +14,10 @@ eval instant at 40s metric {__name__="metric"} 2 # It goes stale 5 minutes after the last sample. -eval instant at 330s metric +eval instant at 329s metric {__name__="metric"} 2 -eval instant at 331s metric +eval instant at 330s metric # Range vector ignores stale sample. @@ -30,9 +30,13 @@ eval instant at 10s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[10s]) + +eval instant at 20s count_over_time(metric[20s]) {} 1 eval instant at 20s count_over_time(metric[10]) + +eval instant at 20s count_over_time(metric[20]) {} 1 @@ -48,7 +52,7 @@ eval instant at 0s metric eval instant at 150s metric {__name__="metric"} 0 -eval instant at 300s metric +eval instant at 299s metric {__name__="metric"} 0 -eval instant at 301s metric +eval instant at 300s metric diff --git a/promql/promqltest/testdata/subquery.test b/promql/promqltest/testdata/subquery.test index 1d338d97642..3ac547a2b57 100644 --- a/promql/promqltest/testdata/subquery.test +++ b/promql/promqltest/testdata/subquery.test @@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s]) # Every evaluation yields the last value, i.e. 2 eval instant at 5m sum_over_time(metric[50s:10s]) - {} 12 + {} 10 # Series becomes stale at 5m10s (5m after last sample) -# Hence subquery gets a single sample at 6m-50s=5m10s. -eval instant at 6m sum_over_time(metric[50s:10s]) +# Hence subquery gets a single sample at 5m10s. +eval instant at 5m59s sum_over_time(metric[60s:10s]) {} 2 eval instant at 10s rate(metric[20s:10s]) {} 0.1 eval instant at 20s rate(metric[20s:5s]) - {} 0.05 + {} 0.06666666666666667 clear @@ -49,16 +49,16 @@ load 10s metric3 0+3x1000 eval instant at 1000s sum_over_time(metric1[30s:10s]) - {} 394 + {} 297 -# This is (394*2 - 100), because other than the last 100 at 1000s, +# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s, # everything else is repeated with the 5s step. eval instant at 1000s sum_over_time(metric1[30s:5s]) - {} 688 + {} 591 -# Offset is aligned with the step. +# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s]. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) - {} 394 + {} 297 # Same result for different offsets due to step alignment. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) @@ -93,16 +93,16 @@ eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) # Nested subqueries eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) - {} 0.4 + {} 0.30000000000000004 eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) - {} 0.8 + {} 0.6000000000000001 eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) - {} 1.2 + {} 0.9 eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) - {} 2.4 + {} 1.8 clear @@ -115,16 +115,20 @@ load 7s eval instant at 80s rate(metric[1m]) {} 2.517857143 -# No extrapolation, [2@20, 144@80]: (144 - 2) / 60 -eval instant at 80s rate(metric[1m:10s]) - {} 2.366666667 +# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20) +eval instant at 80s rate(metric[1m500ms:10s]) + {} 2.3666666666666667 + +# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61 +eval instant at 80s rate(metric[1m1s:10s]) + {} 2.360655737704918 # Only one value between 10s and 20s, 2@14 eval instant at 20s min_over_time(metric[10s]) {} 2 -# min(1@10, 2@20) -eval instant at 20s min_over_time(metric[10s:10s]) +# min(2@20) +eval instant at 20s min_over_time(metric[15s:10s]) {} 1 eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) diff --git a/promql/promqltest/testdata/trig_functions.test b/promql/promqltest/testdata/trig_functions.test index fa5f94651b6..036621193d7 100644 --- a/promql/promqltest/testdata/trig_functions.test +++ b/promql/promqltest/testdata/trig_functions.test @@ -5,92 +5,92 @@ load 5m trig{l="y"} 20 trig{l="NaN"} NaN -eval instant at 5m sin(trig) +eval instant at 1m sin(trig) {l="x"} -0.5440211108893699 {l="y"} 0.9129452507276277 {l="NaN"} NaN -eval instant at 5m cos(trig) +eval instant at 1m cos(trig) {l="x"} -0.8390715290764524 {l="y"} 0.40808206181339196 {l="NaN"} NaN -eval instant at 5m tan(trig) +eval instant at 1m tan(trig) {l="x"} 0.6483608274590867 {l="y"} 2.2371609442247427 {l="NaN"} NaN -eval instant at 5m asin(trig - 10.1) +eval instant at 1m asin(trig - 10.1) {l="x"} -0.10016742116155944 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m acos(trig - 10.1) +eval instant at 1m acos(trig - 10.1) {l="x"} 1.670963747956456 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m atan(trig) +eval instant at 1m atan(trig) {l="x"} 1.4711276743037345 {l="y"} 1.5208379310729538 {l="NaN"} NaN -eval instant at 5m sinh(trig) +eval instant at 1m sinh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m cosh(trig) +eval instant at 1m cosh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m tanh(trig) +eval instant at 1m tanh(trig) {l="x"} 0.9999999958776927 {l="y"} 1 {l="NaN"} NaN -eval instant at 5m asinh(trig) +eval instant at 1m asinh(trig) {l="x"} 2.99822295029797 {l="y"} 3.6895038689889055 {l="NaN"} NaN -eval instant at 5m acosh(trig) +eval instant at 1m acosh(trig) {l="x"} 2.993222846126381 {l="y"} 3.6882538673612966 {l="NaN"} NaN -eval instant at 5m atanh(trig - 10.1) +eval instant at 1m atanh(trig - 10.1) {l="x"} -0.10033534773107522 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m rad(trig) +eval instant at 1m rad(trig) {l="x"} 0.17453292519943295 {l="y"} 0.3490658503988659 {l="NaN"} NaN -eval instant at 5m rad(trig - 10) +eval instant at 1m rad(trig - 10) {l="x"} 0 {l="y"} 0.17453292519943295 {l="NaN"} NaN -eval instant at 5m rad(trig - 20) +eval instant at 1m rad(trig - 20) {l="x"} -0.17453292519943295 {l="y"} 0 {l="NaN"} NaN -eval instant at 5m deg(trig) +eval instant at 1m deg(trig) {l="x"} 572.9577951308232 {l="y"} 1145.9155902616465 {l="NaN"} NaN -eval instant at 5m deg(trig - 10) +eval instant at 1m deg(trig - 10) {l="x"} 0 {l="y"} 572.9577951308232 {l="NaN"} NaN -eval instant at 5m deg(trig - 20) +eval instant at 1m deg(trig - 20) {l="x"} -572.9577951308232 {l="y"} 0 {l="NaN"} NaN diff --git a/promql/quantile.go b/promql/quantile.go index 7ddb76acba7..06775d3ae67 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -153,19 +153,31 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { // histogramQuantile calculates the quantile 'q' based on the given histogram. // -// The quantile value is interpolated assuming a linear distribution within a -// bucket. -// TODO(beorn7): Find an interpolation method that is a better fit for -// exponential buckets (and think about configurable interpolation). +// For custom buckets, the result is interpolated linearly, i.e. it is assumed +// the observations are uniformly distributed within each bucket. (This is a +// quite blunt assumption, but it is consistent with the interpolation method +// used for classic histograms so far.) +// +// For exponential buckets, the interpolation is done under the assumption that +// the samples within each bucket are distributed in a way that they would +// uniformly populate the buckets in a hypothetical histogram with higher +// resolution. For example, if the rank calculation suggests that the requested +// quantile is right in the middle of the population of the (1,2] bucket, we +// assume the quantile would be right at the bucket boundary between the two +// buckets the (1,2] bucket would be divided into if the histogram had double +// the resolution, which is 2**2**-1 = 1.4142... We call this exponential +// interpolation. +// +// However, for a quantile that ends up in the zero bucket, this method isn't +// very helpful (because there is an infinite number of buckets close to zero, +// so we would have to assume zero as the result). Therefore, we return to +// linear interpolation in the zero bucket. // // A natural lower bound of 0 is assumed if the histogram has only positive // buckets. Likewise, a natural upper bound of 0 is assumed if the histogram has // only negative buckets. -// TODO(beorn7): Come to terms if we want that. // -// There are a number of special cases (once we have a way to report errors -// happening during evaluations of AST functions, we should report those -// explicitly): +// There are a number of special cases: // // If the histogram has 0 observations, NaN is returned. // @@ -193,9 +205,9 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank float64 ) - // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator - // if the q < 0.5, use the forward iterator - // if the q >= 0.5, use the reverse iterator + // If there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator. + // If q < 0.5, use the forward iterator. + // If q >= 0.5, use the reverse iterator. if math.IsNaN(h.Sum) || q < 0.5 { it = h.AllBucketIterator() rank = q * h.Count @@ -260,8 +272,29 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank = count - rank } - // TODO(codesome): Use a better estimation than linear. - return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count) + // The fraction of how far we are into the current bucket. + fraction := rank / bucket.Count + + // Return linear interpolation for custom buckets and for quantiles that + // end up in the zero bucket. + if h.UsesCustomBuckets() || (bucket.Lower <= 0 && bucket.Upper >= 0) { + return bucket.Lower + (bucket.Upper-bucket.Lower)*fraction + } + + // For exponential buckets, we interpolate on a logarithmic scale. On a + // logarithmic scale, the exponential bucket boundaries (for any schema) + // become linear (every bucket has the same width). Therefore, after + // taking the logarithm of both bucket boundaries, we can use the + // calculated fraction in the same way as for linear interpolation (see + // above). Finally, we return to the normal scale by applying the + // exponential function to the result. + logLower := math.Log2(math.Abs(bucket.Lower)) + logUpper := math.Log2(math.Abs(bucket.Upper)) + if bucket.Lower > 0 { // Positive bucket. + return math.Exp2(logLower + (logUpper-logLower)*fraction) + } + // Otherwise, we are in a negative bucket and have to mirror things. + return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction)) } // histogramFraction calculates the fraction of observations between the @@ -271,8 +304,8 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { // histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h) // returns 0.9. // -// The same notes (and TODOs) with regard to interpolation and assumptions about -// the zero bucket boundaries apply as for histogramQuantile. +// The same notes with regard to interpolation and assumptions about the zero +// bucket boundaries apply as for histogramQuantile. // // Whether either boundary is inclusive or exclusive doesn’t actually matter as // long as interpolation has to be performed anyway. In the case of a boundary @@ -310,7 +343,35 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 ) for it.Next() { b := it.At() - if b.Lower < 0 && b.Upper > 0 { + zeroBucket := false + + // interpolateLinearly is used for custom buckets to be + // consistent with the linear interpolation known from classic + // histograms. It is also used for the zero bucket. + interpolateLinearly := func(v float64) float64 { + return rank + b.Count*(v-b.Lower)/(b.Upper-b.Lower) + } + + // interpolateExponentially is using the same exponential + // interpolation method as above for histogramQuantile. This + // method is a better fit for exponential bucketing. + interpolateExponentially := func(v float64) float64 { + var ( + logLower = math.Log2(math.Abs(b.Lower)) + logUpper = math.Log2(math.Abs(b.Upper)) + logV = math.Log2(math.Abs(v)) + fraction float64 + ) + if v > 0 { + fraction = (logV - logLower) / (logUpper - logLower) + } else { + fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) + } + return rank + b.Count*fraction + } + + if b.Lower <= 0 && b.Upper >= 0 { + zeroBucket = true switch { case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // This is the zero bucket and the histogram has only @@ -325,10 +386,12 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 } } if !lowerSet && b.Lower >= lower { + // We have hit the lower value at the lower bucket boundary. lowerRank = rank lowerSet = true } if !upperSet && b.Lower >= upper { + // We have hit the upper value at the lower bucket boundary. upperRank = rank upperSet = true } @@ -336,11 +399,21 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 break } if !lowerSet && b.Lower < lower && b.Upper > lower { - lowerRank = rank + b.Count*(lower-b.Lower)/(b.Upper-b.Lower) + // The lower value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + lowerRank = interpolateLinearly(lower) + } else { + lowerRank = interpolateExponentially(lower) + } lowerSet = true } if !upperSet && b.Lower < upper && b.Upper > upper { - upperRank = rank + b.Count*(upper-b.Lower)/(b.Upper-b.Lower) + // The upper value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + upperRank = interpolateLinearly(upper) + } else { + upperRank = interpolateExponentially(upper) + } upperSet = true } if lowerSet && upperSet { diff --git a/promql/query_logger.go b/promql/query_logger.go index 7e06ebb97fe..c0a70b66d77 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -26,14 +27,12 @@ import ( "unicode/utf8" "github.com/edsrzf/mmap-go" - "github.com/go-kit/log" - "github.com/go-kit/log/level" ) type ActiveQueryTracker struct { - mmapedFile []byte + mmappedFile []byte getNextIndex chan int - logger log.Logger + logger *slog.Logger closer io.Closer maxConcurrent int } @@ -63,11 +62,11 @@ func parseBrokenJSON(brokenJSON []byte) (string, bool) { return queries, true } -func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { +func logUnfinishedQueries(filename string, filesize int, logger *slog.Logger) { if _, err := os.Stat(filename); err == nil { fd, err := os.Open(filename) if err != nil { - level.Error(logger).Log("msg", "Failed to open query log file", "err", err) + logger.Error("Failed to open query log file", "err", err) return } defer fd.Close() @@ -75,7 +74,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { brokenJSON := make([]byte, filesize) _, err = fd.Read(brokenJSON) if err != nil { - level.Error(logger).Log("msg", "Failed to read query log file", "err", err) + logger.Error("Failed to read query log file", "err", err) return } @@ -83,72 +82,72 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { if !queriesExist { return } - level.Info(logger).Log("msg", "These queries didn't finish in prometheus' last run:", "queries", queries) + logger.Info("These queries didn't finish in prometheus' last run:", "queries", queries) } } -type mmapedFile struct { +type mmappedFile struct { f io.Closer m mmap.MMap } -func (f *mmapedFile) Close() error { +func (f *mmappedFile) Close() error { err := f.m.Unmap() if err != nil { - err = fmt.Errorf("mmapedFile: unmapping: %w", err) + err = fmt.Errorf("mmappedFile: unmapping: %w", err) } if fErr := f.f.Close(); fErr != nil { - return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err) + return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err) } return err } -func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { +func getMMappedFile(filename string, filesize int, logger *slog.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { absPath, pathErr := filepath.Abs(filename) if pathErr != nil { absPath = filename } - level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err) + logger.Error("Error opening query log file", "file", absPath, "err", err) return nil, nil, err } err = file.Truncate(int64(filesize)) if err != nil { file.Close() - level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) + logger.Error("Error setting filesize.", "filesize", filesize, "err", err) return nil, nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { file.Close() - level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) + logger.Error("Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) return nil, nil, err } - return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err + return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err } -func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { +func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger *slog.Logger) *ActiveQueryTracker { err := os.MkdirAll(localStoragePath, 0o777) if err != nil { - level.Error(logger).Log("msg", "Failed to create directory for logging active queries") + logger.Error("Failed to create directory for logging active queries") } filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize logUnfinishedQueries(filename, filesize, logger) - fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger) + fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } copy(fileAsBytes, "[") activeQueryTracker := ActiveQueryTracker{ - mmapedFile: fileAsBytes, + mmappedFile: fileAsBytes, closer: closer, getNextIndex: make(chan int, maxConcurrent), logger: logger, @@ -174,18 +173,18 @@ func trimStringByBytes(str string, size int) string { return string(bytesStr[:trimIndex]) } -func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { +func _newJSONEntry(query string, timestamp int64, logger *slog.Logger) []byte { entry := Entry{query, timestamp} jsonEntry, err := json.Marshal(entry) if err != nil { - level.Error(logger).Log("msg", "Cannot create json of query", "query", query) + logger.Error("Cannot create json of query", "query", query) return []byte{} } return jsonEntry } -func newJSONEntry(query string, logger log.Logger) []byte { +func newJSONEntry(query string, logger *slog.Logger) []byte { timestamp := time.Now().Unix() minEntryJSON := _newJSONEntry("", timestamp, logger) @@ -206,14 +205,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int { } func (tracker ActiveQueryTracker) Delete(insertIndex int) { - copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize)) + copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize)) tracker.getNextIndex <- insertIndex } func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) { select { case i := <-tracker.getNextIndex: - fileBytes := tracker.mmapedFile + fileBytes := tracker.mmappedFile entry := newJSONEntry(query, tracker.logger) start, end := i, i+entrySize diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index 7bd93781ec8..eb06e513efc 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -26,7 +26,7 @@ import ( func TestQueryLogging(t *testing.T) { fileAsBytes := make([]byte, 4096) queryLogger := ActiveQueryTracker{ - mmapedFile: fileAsBytes, + mmappedFile: fileAsBytes, logger: nil, getNextIndex: make(chan int, 4), } @@ -70,7 +70,7 @@ func TestQueryLogging(t *testing.T) { func TestIndexReuse(t *testing.T) { queryBytes := make([]byte, 1+3*entrySize) queryLogger := ActiveQueryTracker{ - mmapedFile: queryBytes, + mmappedFile: queryBytes, logger: nil, getNextIndex: make(chan int, 3), } @@ -106,10 +106,10 @@ func TestIndexReuse(t *testing.T) { func TestMMapFile(t *testing.T) { dir := t.TempDir() - fpath := filepath.Join(dir, "mmapedFile") + fpath := filepath.Join(dir, "mmappedFile") const data = "ab" - fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil) + fileAsBytes, closer, err := getMMappedFile(fpath, 2, nil) require.NoError(t, err) copy(fileAsBytes, data) require.NoError(t, closer.Close()) diff --git a/promql/value.go b/promql/value.go index f25dbcd7809..f19c0b5b582 100644 --- a/promql/value.go +++ b/promql/value.go @@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { ssi.currH = p.H return chunkenc.ValFloatHistogram default: - panic("storageSeriesIterater.Next failed to pick value type") + panic("storageSeriesIterator.Next failed to pick value type") } } diff --git a/rh-manifest.txt b/rh-manifest.txt index 20c09cf60ef..816637bbf73 100644 --- a/rh-manifest.txt +++ b/rh-manifest.txt @@ -1,11 +1,25 @@ -@babel/runtime@7.24.7 +@adobe/css-tools@4.4.0 +@babel/code-frame@7.24.7 +@babel/helper-validator-identifier@7.24.7 +@babel/highlight@7.24.7 +@babel/runtime@7.24.8 +@babel/runtime@7.25.6 @codemirror/autocomplete@6.17.0 +@codemirror/autocomplete@6.18.1 @codemirror/commands@6.6.0 +@codemirror/commands@6.6.1 @codemirror/language@6.10.2 @codemirror/lint@6.8.1 @codemirror/search@6.5.6 @codemirror/state@6.4.1 -@codemirror/view@6.29.1 +@codemirror/theme-one-dark@6.1.2 +@codemirror/view@6.33.0 +@codemirror/view@6.34.1 +@floating-ui/core@1.6.7 +@floating-ui/dom@1.6.10 +@floating-ui/react-dom@2.1.1 +@floating-ui/react@0.26.23 +@floating-ui/utils@0.2.7 @forevolve/bootstrap-dark@4.0.2 @fortawesome/fontawesome-common-types@6.5.2 @fortawesome/fontawesome-svg-core@6.5.2 @@ -14,56 +28,122 @@ @hypnosphi/create-react-context@0.3.1 @lezer/common@1.2.1 @lezer/highlight@1.2.0 +@lezer/highlight@1.2.1 @lezer/lr@1.4.2 +@mantine/code-highlight@7.13.1 +@mantine/core@7.13.1 +@mantine/dates@7.13.1 +@mantine/hooks@7.13.1 +@mantine/notifications@7.13.1 +@mantine/store@7.13.1 +@microsoft/fetch-event-source@2.0.1 @nexucis/fuzzy@0.4.1 +@nexucis/fuzzy@0.5.1 @nexucis/kvsearch@0.8.1 -@prometheus-io/app@0.55.1 +@nexucis/kvsearch@0.9.1 +@prometheus-io/codemirror-promql@0.300.1 @prometheus-io/codemirror-promql@0.55.1 +@prometheus-io/lezer-promql@0.300.1 @prometheus-io/lezer-promql@0.55.1 +@prometheus-io/mantine-ui@0.300.1 +@reduxjs/toolkit@2.2.7 +@remix-run/router@1.19.2 +@tabler/icons-react@3.19.0 +@tabler/icons@3.19.0 +@tanstack/query-core@5.59.0 +@tanstack/react-query@5.59.0 +@testing-library/dom@10.4.0 +@testing-library/jest-dom@6.5.0 +@testing-library/react@16.0.1 +@types/aria-query@5.0.4 @types/http-proxy@1.17.14 -@types/node@20.14.9 -anymatch@3.1.2 -binary-extensions@2.2.0 +@types/lodash@4.17.9 +@types/node@20.14.13 +@types/prop-types@15.7.12 +@types/react-dom@18.3.0 +@types/react@18.3.5 +@types/sanitize-html@2.13.0 +@types/use-sync-external-store@0.0.3 +@uiw/codemirror-extensions-basic-setup@4.23.3 +@uiw/react-codemirror@4.23.3 +ansi-regex@5.0.1 +ansi-styles@3.2.1 +ansi-styles@4.3.0 +ansi-styles@5.2.0 +anymatch@3.1.3 +aria-query@5.3.0 +binary-extensions@2.3.0 bootstrap@4.6.2 -braces@3.0.3 -call-bind@1.0.2 -chokidar@3.5.3 -classnames@2.3.2 +braces@3.0.2 +call-bind@1.0.7 +chalk@2.4.2 +chalk@3.0.0 +chalk@4.1.2 +chokidar@3.6.0 +classnames@2.5.1 +clsx@2.1.1 +codemirror@6.0.1 +color-convert@1.9.3 +color-convert@2.0.1 +color-name@1.1.3 +color-name@1.1.4 +colors@0.5.1 compute-scroll-into-view@3.1.0 -copy-to-clipboard@3.3.2 -crelt@1.0.5 +copy-to-clipboard@3.3.3 +crelt@1.0.6 css.escape@1.5.1 +csstype@3.1.3 +dayjs@1.11.13 debug@4.3.4 -deep-equal@1.1.1 -deepmerge@4.2.2 -define-properties@1.1.4 +deep-equal@1.1.2 +deepmerge@4.3.1 +define-data-property@1.1.4 +define-properties@1.2.1 +dequal@2.0.3 +detect-node-es@1.1.0 +discontinuous-range@1.0.0 +dom-accessibility-api@0.5.16 +dom-accessibility-api@0.6.3 dom-helpers@3.4.0 +dom-helpers@5.2.1 dom-serializer@2.0.0 domelementtype@2.3.0 domhandler@5.0.3 -domutils@3.0.1 +domutils@3.1.0 downshift@9.0.6 -entities@4.4.0 +entities@4.5.0 +es-define-property@1.0.0 +es-errors@1.3.0 +escape-string-regexp@1.0.5 escape-string-regexp@4.0.0 eventemitter3@4.0.7 -fill-range@7.1.1 +fill-range@7.0.1 follow-redirects@1.15.6 fsevents@2.3.3 -function-bind@1.1.1 +function-bind@1.1.2 functions-have-names@1.2.3 -get-intrinsic@1.1.3 +get-intrinsic@1.2.4 +get-nonce@1.0.1 glob-parent@5.1.2 +gopd@1.0.1 gud@1.0.0 -has-property-descriptors@1.0.0 +has-flag@3.0.0 +has-flag@4.0.0 +has-property-descriptors@1.0.2 +has-proto@1.0.3 has-symbols@1.0.3 -has-tostringtag@1.0.0 -has@1.0.3 +has-tostringtag@1.0.2 +hasown@2.0.2 +highlight.js@11.10.0 history@4.10.1 hoist-non-react-statics@3.3.2 -htmlparser2@8.0.1 +htmlparser2@8.0.2 http-proxy-middleware@3.0.0 http-proxy@1.18.1 -immutable@4.1.0 +immer@10.1.1 +immutable@4.3.5 +indent-string@4.0.0 +invariant@2.2.4 is-arguments@1.1.1 is-binary-path@2.1.0 is-date-object@1.0.5 @@ -77,62 +157,110 @@ isarray@0.0.1 jquery.flot.tooltip@0.9.0 jquery@3.7.1 js-tokens@4.0.0 +lodash.flattendeep@4.4.0 lodash@4.17.21 loose-envify@1.4.0 +lru-cache@11.0.1 lru-cache@7.18.3 +lz-string@1.5.0 micromatch@4.0.5 +min-indent@1.0.1 moment-timezone@0.5.45 moment@2.29.4 moment@2.30.1 ms@2.1.2 -nanoid@3.3.4 +nanoid@3.3.7 +nearley@2.7.10 +nomnom@1.6.2 normalize-path@3.0.0 object-assign@4.1.1 -object-is@1.1.5 +object-is@1.1.6 object-keys@1.1.1 parse-srcset@1.0.2 path-to-regexp@1.8.0 picocolors@1.0.0 +picocolors@1.1.0 picomatch@2.3.1 popper.js@1.16.1 -postcss@8.4.17 +postcss@8.4.38 +postcss@8.4.47 +pretty-format@27.5.1 prop-types@15.8.1 +railroad-diagrams@1.0.0 +randexp@0.4.6 react-copy-to-clipboard@5.1.0 react-dom@17.0.2 +react-dom@18.3.1 react-infinite-scroll-component@6.1.0 react-is@16.13.1 react-is@17.0.2 react-is@18.2.0 react-lifecycles-compat@3.0.4 +react-number-format@5.4.1 react-popper@1.3.11 +react-redux@9.1.2 +react-remove-scroll-bar@2.3.6 +react-remove-scroll@2.5.10 react-resize-detector@7.1.2 react-router-dom@5.3.4 +react-router-dom@6.26.2 react-router@5.3.4 +react-router@6.26.2 react-shallow-renderer@16.15.0 +react-style-singleton@2.2.1 react-test-renderer@17.0.2 +react-textarea-autosize@8.5.3 react-transition-group@3.0.0 +react-transition-group@4.4.5 react@17.0.2 +react@18.3.1 reactstrap@8.10.1 readdirp@3.6.0 +redent@3.0.0 +redux-thunk@3.1.0 +redux@5.0.1 regenerator-runtime@0.14.1 -regexp.prototype.flags@1.4.3 +regexp.prototype.flags@1.5.2 requires-port@1.0.0 +reselect@5.1.1 resolve-pathname@3.0.0 +ret@0.1.15 sanitize-html@2.13.0 sass@1.77.6 scheduler@0.20.2 -source-map-js@1.0.2 -style-mod@4.1.0 +scheduler@0.23.2 +serialize-query-params@2.0.2 +set-function-length@1.2.2 +set-function-name@2.0.2 +source-map-js@1.2.0 +source-map-js@1.2.1 +strip-indent@3.0.0 +style-mod@4.1.2 +supports-color@5.5.0 +supports-color@7.2.0 +tabbable@6.2.0 tempusdominus-bootstrap-4@5.39.2 tempusdominus-core@5.19.3 throttle-debounce@2.3.0 -tiny-invariant@1.3.1 +tiny-invariant@1.3.3 tiny-warning@1.0.3 to-regex-range@5.0.1 toggle-selection@1.0.6 -tslib@2.6.3 +tslib@2.6.2 +tslib@2.7.0 +type-fest@4.26.0 typed-styles@0.0.7 +underscore@1.4.4 undici-types@5.26.5 +uplot-react@1.2.2 +uplot@1.6.30 +use-callback-ref@1.3.2 +use-composed-ref@1.3.0 +use-isomorphic-layout-effect@1.1.2 +use-latest@1.2.1 +use-query-params@2.2.1 +use-sidecar@1.1.2 +use-sync-external-store@1.2.2 value-equal@1.0.1 -w3c-keyname@2.2.6 +w3c-keyname@2.2.8 warning@4.0.3 diff --git a/rules/alerting.go b/rules/alerting.go index 2dc0917dceb..7e74c176aa5 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -16,13 +16,12 @@ package rules import ( "context" "fmt" + "log/slog" "net/url" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -141,7 +140,7 @@ type AlertingRule struct { // the fingerprint of the labelset they correspond to. active map[uint64]*Alert - logger log.Logger + logger *slog.Logger noDependentRules *atomic.Bool noDependencyRules *atomic.Bool @@ -151,7 +150,7 @@ type AlertingRule struct { func NewAlertingRule( name string, vec parser.Expr, hold, keepFiringFor time.Duration, labels, annotations, externalLabels labels.Labels, externalURL string, - restored bool, logger log.Logger, + restored bool, logger *slog.Logger, ) *AlertingRule { el := externalLabels.Map() @@ -381,7 +380,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t result, err := tmpl.Expand() if err != nil { result = fmt.Sprintf("", err) - level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData) + r.logger.Warn("Expanding alert template failed", "err", err, "data", tmplData) } return result } diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 67d683c851f..f0aa339cc7e 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -19,8 +19,8 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -276,7 +276,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) ruleWithExternalLabels := NewAlertingRule( "ExternalLabelExists", @@ -287,7 +287,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { labels.EmptyLabels(), labels.FromStrings("foo", "bar", "dings", "bums"), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) result := promql.Vector{ promql.Sample{ @@ -371,7 +371,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) ruleWithExternalURL := NewAlertingRule( "ExternalURLExists", @@ -382,7 +382,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "http://localhost:1234", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) result := promql.Vector{ promql.Sample{ @@ -466,7 +466,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) result := promql.Vector{ promql.Sample{ @@ -527,7 +527,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; `), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) evalTime := time.Unix(0, 0) @@ -607,7 +607,7 @@ func TestAlertingRuleDuplicate(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) _, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0) require.Error(t, err) @@ -651,7 +651,7 @@ func TestAlertingRuleLimit(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) evalTime := time.Unix(0, 0) @@ -779,7 +779,7 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) { }, }, } - nm := notifier.NewManager(&opts, log.NewNopLogger()) + nm := notifier.NewManager(&opts, promslog.NewNopLogger()) f := SendAlerts(nm, "") notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) { @@ -986,7 +986,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { @@ -1008,7 +1008,7 @@ func TestAlertingRule_SetNoDependentRules(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) require.False(t, rule.NoDependentRules()) @@ -1029,7 +1029,7 @@ func TestAlertingRule_SetNoDependencyRules(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) require.False(t, rule.NoDependencyRules()) diff --git a/rules/fixtures/rules1.yaml b/rules/fixtures/rules1.yaml new file mode 100644 index 00000000000..76fbf71f3b1 --- /dev/null +++ b/rules/fixtures/rules1.yaml @@ -0,0 +1,5 @@ +groups: + - name: test_1 + rules: + - record: test_2 + expr: vector(2) diff --git a/rules/group.go b/rules/group.go index 201d3a67d75..7dd046b57ae 100644 --- a/rules/group.go +++ b/rules/group.go @@ -16,6 +16,7 @@ package rules import ( "context" "errors" + "log/slog" "math" "slices" "strings" @@ -26,10 +27,9 @@ import ( "github.com/prometheus/prometheus/promql/parser" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -65,7 +65,7 @@ type Group struct { terminated chan struct{} managerDone chan struct{} - logger log.Logger + logger *slog.Logger metrics *Metrics @@ -75,6 +75,7 @@ type Group struct { // concurrencyController controls the rules evaluation concurrency. concurrencyController RuleConcurrencyController + appOpts *storage.AppendOptions } // GroupEvalIterationFunc is used to implement and extend rule group @@ -124,6 +125,10 @@ func NewGroup(o GroupOptions) *Group { concurrencyController = sequentialRuleEvalController{} } + if o.Opts.Logger == nil { + promslog.NewNopLogger() + } + return &Group{ name: o.Name, file: o.File, @@ -137,10 +142,11 @@ func NewGroup(o GroupOptions) *Group { done: make(chan struct{}), managerDone: o.done, terminated: make(chan struct{}), - logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), + logger: o.Opts.Logger.With("file", o.File, "group", o.Name), metrics: metrics, evalIterationFunc: evalIterationFunc, concurrencyController: concurrencyController, + appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, } } @@ -188,7 +194,7 @@ func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) boo return ok } -// Queryable returns the group's querable. +// Queryable returns the group's queryable. func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable } // Context returns the group's context. @@ -200,7 +206,7 @@ func (g *Group) Interval() time.Duration { return g.interval } // Limit returns the group's limit. func (g *Group) Limit() int { return g.limit } -func (g *Group) Logger() log.Logger { return g.logger } +func (g *Group) Logger() *slog.Logger { return g.logger } func (g *Group) run(ctx context.Context) { defer close(g.terminated) @@ -272,7 +278,7 @@ func (g *Group) run(ctx context.Context) { g.RestoreForState(restoreStartTime) totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds() g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds) - level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) + g.logger.Debug("'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) g.shouldRestore = false } @@ -495,7 +501,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { defer cleanup() } - logger := log.WithPrefix(g.logger, "name", rule.Name(), "index", i) + logger := g.logger.With("name", rule.Name(), "index", i) ctx, sp := otel.Tracer("").Start(ctx, "rule") sp.SetAttributes(attribute.String("name", rule.Name())) defer func(t time.Time) { @@ -508,7 +514,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { }(time.Now()) if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { - logger = log.WithPrefix(logger, "trace_id", sp.SpanContext().TraceID()) + logger = logger.With("trace_id", sp.SpanContext().TraceID()) } g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() @@ -524,7 +530,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // happens on shutdown and thus we skip logging of any errors here. var eqc promql.ErrQueryCanceled if !errors.As(err, &eqc) { - level.Warn(logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err) + logger.Warn("Evaluating rule failed", "rule", rule, "err", err) } return } @@ -550,7 +556,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { sp.SetStatus(codes.Error, err.Error()) g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - level.Warn(logger).Log("msg", "Rule sample appending failed", "err", err) + logger.Warn("Rule sample appending failed", "err", err) return } g.seriesInPreviousEval[i] = seriesReturned @@ -560,6 +566,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if s.H != nil { _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) } else { + app.SetOptions(g.appOpts) _, err = app.Append(0, s.Metric, s.T, s.F) } @@ -574,15 +581,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { switch { case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ - level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrTooOldSample): numTooOld++ - level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): numDuplicates++ - level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) default: - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) } } else { buf := [1024]byte{} @@ -590,13 +597,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } } if numOutOfOrder > 0 { - level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) + logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) } if numTooOld > 0 { - level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) + logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) } if numDuplicates > 0 { - level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) + logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) } for metric, lset := range g.seriesInPreviousEval[i] { @@ -615,7 +622,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(logger).Log("msg", "Adding stale sample failed", "sample", lset.String(), "err", err) + logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) } } } @@ -656,6 +663,7 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { return } app := g.opts.Appendable.Appender(ctx) + app.SetOptions(g.appOpts) queryOffset := g.QueryOffset() for _, s := range g.staleSeries { // Rule that produced series no longer configured, mark it stale. @@ -672,11 +680,11 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err) + g.logger.Warn("Adding stale sample for previous configuration failed", "sample", s, "err", err) } } if err := app.Commit(); err != nil { - level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err) + g.logger.Warn("Stale sample appending for previous configuration failed", "err", err) } else { g.staleSeries = nil } @@ -691,12 +699,12 @@ func (g *Group) RestoreForState(ts time.Time) { mintMS := int64(model.TimeFromUnixNano(mint.UnixNano())) q, err := g.opts.Queryable.Querier(mintMS, maxtMS) if err != nil { - level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err) + g.logger.Error("Failed to get Querier", "err", err) return } defer func() { if err := q.Close(); err != nil { - level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err) + g.logger.Error("Failed to close Querier", "err", err) } }() @@ -717,8 +725,8 @@ func (g *Group) RestoreForState(ts time.Time) { sset, err := alertRule.QueryForStateSeries(g.opts.Context, q) if err != nil { - level.Error(g.logger).Log( - "msg", "Failed to restore 'for' state", + g.logger.Error( + "Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Select", "err", err, @@ -737,7 +745,7 @@ func (g *Group) RestoreForState(ts time.Time) { // No results for this alert rule. if len(seriesByLabels) == 0 { - level.Debug(g.logger).Log("msg", "No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) + g.logger.Debug("No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) alertRule.SetRestored(true) continue } @@ -757,7 +765,7 @@ func (g *Group) RestoreForState(ts time.Time) { t, v = it.At() } if it.Err() != nil { - level.Error(g.logger).Log("msg", "Failed to restore 'for' state", + g.logger.Error("Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err()) return } @@ -799,7 +807,7 @@ func (g *Group) RestoreForState(ts time.Time) { } a.ActiveAt = restoredActiveAt - level.Debug(g.logger).Log("msg", "'for' state restored", + g.logger.Debug("'for' state restored", labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), "labels", a.Labels.String()) }) diff --git a/rules/manager.go b/rules/manager.go index 9e5b33fbc90..6e9bf64691d 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -17,15 +17,15 @@ import ( "context" "errors" "fmt" + "log/slog" "net/url" "slices" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "golang.org/x/sync/semaphore" "github.com/prometheus/prometheus/model/labels" @@ -96,7 +96,7 @@ type Manager struct { done chan struct{} restored bool - logger log.Logger + logger *slog.Logger } // NotifyFunc sends notifications about a set of alerts generated by the given expression. @@ -110,7 +110,7 @@ type ManagerOptions struct { Context context.Context Appendable storage.Appendable Queryable storage.Queryable - Logger log.Logger + Logger *slog.Logger Registerer prometheus.Registerer OutageTolerance time.Duration ForGracePeriod time.Duration @@ -148,6 +148,10 @@ func NewManager(o *ManagerOptions) *Manager { o.RuleDependencyController = ruleDependencyController{} } + if o.Logger == nil { + o.Logger = promslog.NewNopLogger() + } + m := &Manager{ groups: map[string]*Group{}, opts: o, @@ -161,7 +165,7 @@ func NewManager(o *ManagerOptions) *Manager { // Run starts processing of the rule manager. It is blocking. func (m *Manager) Run() { - level.Info(m.logger).Log("msg", "Starting rule manager...") + m.logger.Info("Starting rule manager...") m.start() <-m.done } @@ -175,7 +179,7 @@ func (m *Manager) Stop() { m.mtx.Lock() defer m.mtx.Unlock() - level.Info(m.logger).Log("msg", "Stopping rule manager...") + m.logger.Info("Stopping rule manager...") for _, eg := range m.groups { eg.stop() @@ -185,7 +189,7 @@ func (m *Manager) Stop() { // staleness markers. close(m.done) - level.Info(m.logger).Log("msg", "Rule manager stopped") + m.logger.Info("Rule manager stopped") } // Update the rule manager's state as the config requires. If @@ -206,7 +210,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels if errs != nil { for _, e := range errs { - level.Error(m.logger).Log("msg", "loading groups failed", "err", e) + m.logger.Error("loading groups failed", "err", e) } return errors.New("error loading rules, previous rule set restored") } @@ -312,25 +316,27 @@ func (m *Manager) LoadGroups( return nil, []error{fmt.Errorf("%s: %w", fn, err)} } + mLabels := FromMaps(rg.Labels, r.Labels) + if r.Alert.Value != "" { rules = append(rules, NewAlertingRule( r.Alert.Value, expr, time.Duration(r.For), time.Duration(r.KeepFiringFor), - labels.FromMap(r.Labels), + mLabels, labels.FromMap(r.Annotations), externalLabels, externalURL, m.restored, - log.With(m.logger, "alert", r.Alert), + m.logger.With("alert", r.Alert), )) continue } rules = append(rules, NewRecordingRule( r.Record.Value, expr, - labels.FromMap(r.Labels), + mLabels, )) } @@ -501,3 +507,16 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) } func (c sequentialRuleEvalController) Done(_ context.Context) {} + +// FromMaps returns new sorted Labels from the given maps, overriding each other in order. +func FromMaps(maps ...map[string]string) labels.Labels { + mLables := make(map[string]string) + + for _, m := range maps { + for k, v := range m { + mLables[k] = v + } + } + + return labels.FromMap(mLables) +} diff --git a/rules/manager_test.go b/rules/manager_test.go index b9f6db3273e..6afac993d83 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -26,10 +26,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -374,7 +374,7 @@ func TestForStateRestore(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, OutageTolerance: 30 * time.Minute, ForGracePeriod: 10 * time.Minute, @@ -547,7 +547,7 @@ func TestStaleness(t *testing.T) { Appendable: st, Queryable: st, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("a + 1") @@ -641,7 +641,7 @@ groups: require.NoError(t, err) m := NewManager(&ManagerOptions{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), DefaultRuleQueryOffset: func() time.Duration { return time.Minute }, @@ -781,7 +781,7 @@ func TestUpdate(t *testing.T) { Queryable: st, QueryFunc: EngineQueryFunc(engine, st), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() defer ruleManager.Stop() @@ -855,10 +855,11 @@ type ruleGroupsTest struct { // ruleGroupTest forms a testing struct for running tests over rules. type ruleGroupTest struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []rulefmt.Rule `yaml:"rules"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []rulefmt.Rule `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` } func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest { @@ -881,6 +882,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest { Interval: g.Interval, Limit: g.Limit, Rules: rtmp, + Labels: g.Labels, }) } return ruleGroupsTest{ @@ -923,14 +925,14 @@ func TestNotify(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), NotifyFunc: notifyFunc, ResendDelay: 2 * time.Second, } expr, err := parser.ParseExpr("a > 1") require.NoError(t, err) - rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) + rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger()) group := NewGroup(GroupOptions{ Name: "alert", Interval: time.Second, @@ -994,7 +996,7 @@ func TestMetricsUpdate(t *testing.T) { Queryable: storage, QueryFunc: EngineQueryFunc(engine, storage), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Registerer: registry, }) ruleManager.start() @@ -1068,7 +1070,7 @@ func TestGroupStalenessOnRemoval(t *testing.T) { Queryable: storage, QueryFunc: EngineQueryFunc(engine, storage), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) var stopped bool ruleManager.start() @@ -1145,7 +1147,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) { Queryable: storage, QueryFunc: EngineQueryFunc(engine, storage), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) var stopped bool ruleManager.start() @@ -1193,6 +1195,53 @@ func countStaleNaN(t *testing.T, st storage.Storage) int { return c } +func TestRuleMovedBetweenGroups(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + storage := teststorage.New(t, 600000) + defer storage.Close() + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10, + Timeout: 10 * time.Second, + } + engine := promql.NewEngine(opts) + ruleManager := NewManager(&ManagerOptions{ + Appendable: storage, + Queryable: storage, + QueryFunc: EngineQueryFunc(engine, storage), + Context: context.Background(), + Logger: promslog.NewNopLogger(), + }) + var stopped bool + ruleManager.start() + defer func() { + if !stopped { + ruleManager.Stop() + } + }() + + rule2 := "fixtures/rules2.yaml" + rule1 := "fixtures/rules1.yaml" + + // Load initial configuration of rules2 + require.NoError(t, ruleManager.Update(1*time.Second, []string{rule2}, labels.EmptyLabels(), "", nil)) + + // Wait for rule to be evaluated + time.Sleep(3 * time.Second) + + // Reload configuration of rules1 + require.NoError(t, ruleManager.Update(1*time.Second, []string{rule1}, labels.EmptyLabels(), "", nil)) + + // Wait for rule to be evaluated in new location and potential staleness marker + time.Sleep(3 * time.Second) + + require.Equal(t, 0, countStaleNaN(t, storage)) // Not expecting any stale markers. +} + func TestGroupHasAlertingRules(t *testing.T) { tests := []struct { group *Group @@ -1247,7 +1296,7 @@ func TestRuleHealthUpdates(t *testing.T) { Appendable: st, Queryable: st, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("a + 1") @@ -1345,7 +1394,7 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, OutageTolerance: 30 * time.Minute, ForGracePeriod: 10 * time.Minute, @@ -1431,7 +1480,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { Appendable: storage, Queryable: storage, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum(histogram_metric)") @@ -1479,7 +1528,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci ruleManager := NewManager(&ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Appendable: storage, QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, }) @@ -1535,7 +1584,7 @@ func TestDependencyMap(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1544,7 +1593,7 @@ func TestDependencyMap(t *testing.T) { expr, err = parser.ParseExpr("user:requests:rate1m <= 0") require.NoError(t, err) - rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) + rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger()) expr, err = parser.ParseExpr("sum by (user) (rate(requests[5m]))") require.NoError(t, err) @@ -1584,7 +1633,7 @@ func TestNoDependency(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1607,7 +1656,7 @@ func TestDependenciesEdgeCases(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } t.Run("empty group", func(t *testing.T) { @@ -1765,7 +1814,7 @@ func TestNoMetricSelector(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1794,7 +1843,7 @@ func TestDependentRulesWithNonMetricExpression(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") @@ -1803,7 +1852,7 @@ func TestDependentRulesWithNonMetricExpression(t *testing.T) { expr, err = parser.ParseExpr("user:requests:rate1m <= 0") require.NoError(t, err) - rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) + rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger()) expr, err = parser.ParseExpr("3") require.NoError(t, err) @@ -1826,7 +1875,7 @@ func TestRulesDependentOnMetaMetrics(t *testing.T) { ctx := context.Background() opts := &ManagerOptions{ Context: ctx, - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), } // This rule is not dependent on any other rules in its group but it does depend on `ALERTS`, which is produced by @@ -1855,7 +1904,7 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) { files := []string{"fixtures/rules.yaml"} ruleManager := NewManager(&ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() @@ -2107,7 +2156,7 @@ func TestUpdateWhenStopped(t *testing.T) { files := []string{"fixtures/rules.yaml"} ruleManager := NewManager(&ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil) @@ -2129,7 +2178,7 @@ func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.I return &ManagerOptions{ Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ConcurrentEvalsEnabled: concurrent, MaxConcurrentEvals: maxConcurrent, Appendable: storage, @@ -2158,3 +2207,18 @@ func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.I }, } } + +func TestLabels_FromMaps(t *testing.T) { + mLabels := FromMaps( + map[string]string{"aaa": "101", "bbb": "222"}, + map[string]string{"aaa": "111", "ccc": "333"}, + ) + + expected := labels.New( + labels.Label{Name: "aaa", Value: "111"}, + labels.Label{Name: "bbb", Value: "222"}, + labels.Label{Name: "ccc", Value: "333"}, + ) + + require.Equal(t, expected, mLabels, "unexpected labelset") +} diff --git a/rules/origin_test.go b/rules/origin_test.go index 75c83f9a4e9..0bf428f3c1d 100644 --- a/rules/origin_test.go +++ b/rules/origin_test.go @@ -19,9 +19,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -96,7 +97,7 @@ func TestNewRuleDetail(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", - true, log.NewNopLogger(), + true, promslog.NewNopLogger(), ) detail := NewRuleDetail(rule) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 116fa5c94bb..12a56d70715 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -43,6 +43,8 @@ func (a nopAppendable) Appender(_ context.Context) storage.Appender { type nopAppender struct{} +func (a nopAppender) SetOptions(opts *storage.AppendOptions) {} + func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { return 0, nil } @@ -55,6 +57,10 @@ func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *h return 0, nil } +func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, nil +} + func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { return 0, nil } @@ -78,9 +84,10 @@ func equalFloatSamples(a, b floatSample) bool { } type histogramSample struct { - t int64 - h *histogram.Histogram - fh *histogram.FloatHistogram + metric labels.Labels + t int64 + h *histogram.Histogram + fh *histogram.FloatHistogram } type collectResultAppendable struct { @@ -109,6 +116,8 @@ type collectResultAppender struct { pendingMetadata []metadata.Metadata } +func (a *collectResultAppender) SetOptions(opts *storage.AppendOptions) {} + func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() @@ -146,7 +155,7 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() - a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) + a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t, metric: l}) if a.next == nil { return 0, nil } @@ -154,6 +163,13 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels. return a.next.AppendHistogram(ref, l, t, h, fh) } +func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + return a.AppendHistogram(ref, l, ct, &histogram.Histogram{}, nil) + } + return a.AppendHistogram(ref, l, ct, nil, &histogram.FloatHistogram{}) +} + func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() diff --git a/scrape/manager.go b/scrape/manager.go index d7786a082bd..f3dad2a0488 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -17,32 +17,32 @@ import ( "errors" "fmt" "hash/fnv" - "io" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/osutil" "github.com/prometheus/prometheus/util/pool" ) // NewManager is the Manager constructor. -func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(string) (log.Logger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { +func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { if o == nil { o = &Options{} } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } sm, err := newScrapeMetrics(registerer) @@ -70,8 +70,7 @@ func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(strin // Options are the configuration parameters to the scrape manager. type Options struct { - ExtraMetrics bool - NoDefaultPort bool + ExtraMetrics bool // Option used by downstream scraper users like OpenTelemetry Collector // to help lookup metric metadata. Should be false for Prometheus. PassMetadataInContext bool @@ -101,7 +100,7 @@ const DefaultNameEscapingScheme = model.ValueEncodingEscaping // when receiving new target groups from the discovery manager. type Manager struct { opts *Options - logger log.Logger + logger *slog.Logger append storage.Appendable graceShut chan struct{} @@ -109,8 +108,8 @@ type Manager struct { mtxScrape sync.Mutex // Guards the fields below. scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool - newScrapeFailureLogger func(string) (log.Logger, error) - scrapeFailureLoggers map[string]log.Logger + newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error) + scrapeFailureLoggers map[string]*logging.JSONFileLogger targetSets map[string][]*targetgroup.Group buffers *pool.Pool @@ -176,21 +175,26 @@ func (m *Manager) reload() { if _, ok := m.scrapePools[setName]; !ok { scrapeConfig, ok := m.scrapeConfigs[setName] if !ok { - level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) + m.logger.Error("error reloading target set", "err", "invalid config id:"+setName) + continue + } + if scrapeConfig.ConvertClassicHistogramsToNHCB && m.opts.EnableCreatedTimestampZeroIngestion { + // TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137 + m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137") continue } m.metrics.targetScrapePools.Inc() - sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.buffers, m.opts, m.metrics) + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics) if err != nil { m.metrics.targetScrapePoolsFailed.Inc() - level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) + m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName) continue } m.scrapePools[setName] = sp if l, ok := m.scrapeFailureLoggers[scrapeConfig.ScrapeFailureLogFile]; ok { sp.SetScrapeFailureLogger(l) } else { - level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) } } @@ -247,7 +251,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } c := make(map[string]*config.ScrapeConfig) - scrapeFailureLoggers := map[string]log.Logger{ + scrapeFailureLoggers := map[string]*logging.JSONFileLogger{ "": nil, // Emptying the file name sets the scrape logger to nil. } for _, scfg := range scfgs { @@ -255,23 +259,23 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok { // We promise to reopen the file on each reload. var ( - l log.Logger - err error + logger *logging.JSONFileLogger + err error ) if m.newScrapeFailureLogger != nil { - if l, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { + if logger, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { return err } } - scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = l + scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = logger } } m.scrapeConfigs = c oldScrapeFailureLoggers := m.scrapeFailureLoggers for _, s := range oldScrapeFailureLoggers { - if closer, ok := s.(io.Closer); ok { - defer closer.Close() + if s != nil { + defer s.Close() } } @@ -291,7 +295,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { case !reflect.DeepEqual(sp.config, cfg): err := sp.reload(cfg) if err != nil { - level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) + m.logger.Error("error reloading scrape pool", "err", err, "scrape_pool", name) failed = true } fallthrough @@ -299,7 +303,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if l, ok := m.scrapeFailureLoggers[cfg.ScrapeFailureLogFile]; ok { sp.SetScrapeFailureLogger(l) } else { - level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) } } } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index ba32f36cf42..c3544f63445 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -14,6 +14,7 @@ package scrape import ( + "bytes" "context" "fmt" "net/http" @@ -26,33 +27,42 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/runutil" "github.com/prometheus/prometheus/util/testutil" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + func TestPopulateLabels(t *testing.T) { cases := []struct { - in labels.Labels - cfg *config.ScrapeConfig - noDefaultPort bool - res labels.Labels - resOrig labels.Labels - err string + in labels.Labels + cfg *config.ScrapeConfig + res labels.Labels + resOrig labels.Labels + err string }{ // Regular population of scrape config options. { @@ -106,8 +116,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeTimeout: model.Duration(time.Second), }, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4:80", - model.InstanceLabel: "1.2.3.4:80", + model.AddressLabel: "1.2.3.4", + model.InstanceLabel: "1.2.3.4", model.SchemeLabel: "http", model.MetricsPathLabel: "/custom", model.JobLabel: "custom-job", @@ -137,7 +147,7 @@ func TestPopulateLabels(t *testing.T) { ScrapeTimeout: model.Duration(time.Second), }, res: labels.FromMap(map[string]string{ - model.AddressLabel: "[::1]:443", + model.AddressLabel: "[::1]", model.InstanceLabel: "custom-instance", model.SchemeLabel: "https", model.MetricsPathLabel: "/metrics", @@ -360,7 +370,6 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4", model.InstanceLabel: "1.2.3.4", @@ -379,7 +388,7 @@ func TestPopulateLabels(t *testing.T) { model.ScrapeTimeoutLabel: "1s", }), }, - // Remove default port (http). + // verify that the default port is not removed (http). { in: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4:80", @@ -391,9 +400,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4", + model.AddressLabel: "1.2.3.4:80", model.InstanceLabel: "1.2.3.4:80", model.SchemeLabel: "http", model.MetricsPathLabel: "/metrics", @@ -410,7 +418,7 @@ func TestPopulateLabels(t *testing.T) { model.ScrapeTimeoutLabel: "1s", }), }, - // Remove default port (https). + // verify that the default port is not removed (https). { in: labels.FromMap(map[string]string{ model.AddressLabel: "1.2.3.4:443", @@ -422,9 +430,8 @@ func TestPopulateLabels(t *testing.T) { ScrapeInterval: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second), }, - noDefaultPort: true, res: labels.FromMap(map[string]string{ - model.AddressLabel: "1.2.3.4", + model.AddressLabel: "1.2.3.4:443", model.InstanceLabel: "1.2.3.4:443", model.SchemeLabel: "https", model.MetricsPathLabel: "/metrics", @@ -445,7 +452,7 @@ func TestPopulateLabels(t *testing.T) { for _, c := range cases { in := c.in.Copy() - res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort) + res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg) if c.err != "" { require.EqualError(t, err, c.err) } else { @@ -716,37 +723,256 @@ scrape_configs: require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools()) } -// TestManagerCTZeroIngestion tests scrape manager for CT cases. +func setupScrapeManager(t *testing.T, honorTimestamps, enableCTZeroIngestion bool) (*collectResultAppender, *Manager) { + app := &collectResultAppender{} + scrapeManager, err := NewManager( + &Options{ + EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion, + skipOffsetting: true, + }, + promslog.New(&promslog.Config{}), + nil, + &collectResultAppendable{app}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.GlobalConfig{ + // Disable regular scrapes. + ScrapeInterval: model.Duration(9999 * time.Minute), + ScrapeTimeout: model.Duration(5 * time.Second), + ScrapeProtocols: []config.ScrapeProtocol{config.OpenMetricsText1_0_0, config.PrometheusProto}, + }, + ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test", HonorTimestamps: honorTimestamps}}, + })) + + return app, scrapeManager +} + +func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server { + once := sync.Once{} + + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fail := true + once.Do(func() { + fail = false + w.Header().Set("Content-Type", typ) + w.Write(toWrite) + }) + + if fail { + w.WriteHeader(http.StatusInternalServerError) + } + }), + ) + + t.Cleanup(func() { server.Close() }) + + return server +} + +// TestManagerCTZeroIngestion tests scrape manager for various CT cases. func TestManagerCTZeroIngestion(t *testing.T) { - const mName = "expected_counter" + const ( + // _total suffix is required, otherwise expfmt with OMText will mark metric as "unknown" + expectedMetricName = "expected_metric_total" + expectedCreatedMetricName = "expected_metric_created" + expectedSampleValue = 17.0 + ) + + for _, testFormat := range []config.ScrapeProtocol{config.PrometheusProto, config.OpenMetricsText1_0_0} { + t.Run(fmt.Sprintf("format=%s", testFormat), func(t *testing.T) { + for _, testWithCT := range []bool{false, true} { + t.Run(fmt.Sprintf("withCT=%v", testWithCT), func(t *testing.T) { + for _, testCTZeroIngest := range []bool{false, true} { + t.Run(fmt.Sprintf("ctZeroIngest=%v", testCTZeroIngest), func(t *testing.T) { + sampleTs := time.Now() + ctTs := time.Time{} + if testWithCT { + ctTs = sampleTs.Add(-2 * time.Minute) + } + + // TODO(bwplotka): Add more types than just counter? + encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, ctTs) + app, scrapeManager := setupScrapeManager(t, true, testCTZeroIngest) + + // Perform the test. + doOneScrape(t, scrapeManager, app, setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)) + + // Verify results. + // Verify what we got vs expectations around CT injection. + samples := findSamplesForMetric(app.resultFloats, expectedMetricName) + if testWithCT && testCTZeroIngest { + require.Len(t, samples, 2) + require.Equal(t, 0.0, samples[0].f) + require.Equal(t, timestamp.FromTime(ctTs), samples[0].t) + require.Equal(t, expectedSampleValue, samples[1].f) + require.Equal(t, timestamp.FromTime(sampleTs), samples[1].t) + } else { + require.Len(t, samples, 1) + require.Equal(t, expectedSampleValue, samples[0].f) + require.Equal(t, timestamp.FromTime(sampleTs), samples[0].t) + } + + // Verify what we got vs expectations around additional _created series for OM text. + // enableCTZeroInjection also kills that _created line. + createdSeriesSamples := findSamplesForMetric(app.resultFloats, expectedCreatedMetricName) + if testFormat == config.OpenMetricsText1_0_0 && testWithCT && !testCTZeroIngest { + // For OM Text, when counter has CT, and feature flag disabled we should see _created lines. + require.Len(t, createdSeriesSamples, 1) + // Conversion taken from common/expfmt.writeOpenMetricsFloat. + // We don't check the ct timestamp as explicit ts was not implemented in expfmt.Encoder, + // but exists in OM https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created + // We can implement this, but we want to potentially get rid of OM 1.0 CT lines + require.Equal(t, float64(timestamppb.New(ctTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f) + } else { + require.Empty(t, createdSeriesSamples) + } + }) + } + }) + } + }) + } +} + +func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName string, v float64, ts, ct time.Time) (encoded []byte) { + t.Helper() + + counter := &dto.Counter{Value: proto.Float64(v)} + if !ct.IsZero() { + counter.CreatedTimestamp = timestamppb.New(ct) + } + ctrType := dto.MetricType_COUNTER + inputMetric := &dto.MetricFamily{ + Name: proto.String(mName), + Type: &ctrType, + Metric: []*dto.Metric{{ + TimestampMs: proto.Int64(timestamp.FromTime(ts)), + Counter: counter, + }}, + } + switch format { + case config.PrometheusProto: + return protoMarshalDelimited(t, inputMetric) + case config.OpenMetricsText1_0_0: + buf := &bytes.Buffer{} + require.NoError(t, expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeOpenMetrics), expfmt.WithCreatedLines(), expfmt.WithUnit()).Encode(inputMetric)) + _, _ = buf.WriteString("# EOF") + + t.Log("produced OM text to expose:", buf.String()) + return buf.Bytes() + default: + t.Fatalf("not implemented format: %v", format) + return nil + } +} + +func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender, server *httptest.Server) { + t.Helper() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Add fake target directly into tsets + reload + manager.updateTsets(map[string][]*targetgroup.Group{ + "test": {{ + Targets: []model.LabelSet{{ + model.SchemeLabel: model.LabelValue(serverURL.Scheme), + model.AddressLabel: model.LabelValue(serverURL.Host), + }}, + }}, + }) + manager.reload() + + // Wait for one scrape. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + appender.mtx.Lock() + defer appender.mtx.Unlock() + + // Check if scrape happened and grab the relevant samples. + if len(appender.resultFloats) > 0 { + return nil + } + return fmt.Errorf("expected some float samples, got none") + }), "after 1 minute") + manager.Stop() +} + +func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) { + for _, f := range floats { + if f.metric.Get(model.MetricNameLabel) == metricName { + ret = append(ret, f) + } + } + return ret +} + +// generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram, +// but in the form of dto.Histogram. +func generateTestHistogram(i int) *dto.Histogram { + helper := tsdbutil.GenerateTestHistogram(i) + h := &dto.Histogram{} + h.SampleCount = proto.Uint64(helper.Count) + h.SampleSum = proto.Float64(helper.Sum) + h.Schema = proto.Int32(helper.Schema) + h.ZeroThreshold = proto.Float64(helper.ZeroThreshold) + h.ZeroCount = proto.Uint64(helper.ZeroCount) + h.PositiveSpan = make([]*dto.BucketSpan, len(helper.PositiveSpans)) + for i, span := range helper.PositiveSpans { + h.PositiveSpan[i] = &dto.BucketSpan{ + Offset: proto.Int32(span.Offset), + Length: proto.Uint32(span.Length), + } + } + h.PositiveDelta = helper.PositiveBuckets + h.NegativeSpan = make([]*dto.BucketSpan, len(helper.NegativeSpans)) + for i, span := range helper.NegativeSpans { + h.NegativeSpan[i] = &dto.BucketSpan{ + Offset: proto.Int32(span.Offset), + Length: proto.Uint32(span.Length), + } + } + h.NegativeDelta = helper.NegativeBuckets + return h +} + +func TestManagerCTZeroIngestionHistogram(t *testing.T) { + const mName = "expected_histogram" for _, tc := range []struct { name string - counterSample *dto.Counter + inputHistSample *dto.Histogram enableCTZeroIngestion bool }{ { - name: "disabled with CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - // Timestamp does not matter as long as it exists in this test. - CreatedTimestamp: timestamppb.Now(), - }, + name: "disabled with CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + h.CreatedTimestamp = timestamppb.Now() + return h + }(), + enableCTZeroIngestion: false, }, { - name: "enabled with CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - // Timestamp does not matter as long as it exists in this test. - CreatedTimestamp: timestamppb.Now(), - }, + name: "enabled with CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + h.CreatedTimestamp = timestamppb.Now() + return h + }(), enableCTZeroIngestion: true, }, { - name: "enabled without CT on counter", - counterSample: &dto.Counter{ - Value: proto.Float64(1.0), - }, + name: "enabled without CT on histogram", + inputHistSample: func() *dto.Histogram { + h := generateTestHistogram(0) + return h + }(), enableCTZeroIngestion: true, }, } { @@ -755,9 +981,10 @@ func TestManagerCTZeroIngestion(t *testing.T) { scrapeManager, err := NewManager( &Options{ EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, + EnableNativeHistogramsIngestion: true, skipOffsetting: true, }, - log.NewLogfmtLogger(os.Stderr), + promslog.New(&promslog.Config{}), nil, &collectResultAppendable{app}, prometheus.NewRegistry(), @@ -780,16 +1007,16 @@ func TestManagerCTZeroIngestion(t *testing.T) { // Start fake HTTP target to that allow one scrape only. server := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fail := true + fail := true // TODO(bwplotka): Kill or use? once.Do(func() { fail = false w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) - ctrType := dto.MetricType_COUNTER + ctrType := dto.MetricType_HISTOGRAM w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ Name: proto.String(mName), Type: &ctrType, - Metric: []*dto.Metric{{Counter: tc.counterSample}}, + Metric: []*dto.Metric{{Histogram: tc.inputHistSample}}, })) }) @@ -815,7 +1042,8 @@ func TestManagerCTZeroIngestion(t *testing.T) { }) scrapeManager.reload() - var got []float64 + var got []histogramSample + // Wait for one scrape. ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() @@ -823,32 +1051,35 @@ func TestManagerCTZeroIngestion(t *testing.T) { app.mtx.Lock() defer app.mtx.Unlock() - // Check if scrape happened and grab the relevant samples, they have to be there - or it's a bug + // Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug // and it's not worth waiting. - for _, f := range app.resultFloats { - if f.metric.Get(model.MetricNameLabel) == mName { - got = append(got, f.f) + for _, h := range app.resultHistograms { + if h.metric.Get(model.MetricNameLabel) == mName { + got = append(got, h) } } - if len(app.resultFloats) > 0 { + if len(app.resultHistograms) > 0 { return nil } - return fmt.Errorf("expected some samples, got none") + return fmt.Errorf("expected some histogram samples, got none") }), "after 1 minute") scrapeManager.Stop() - // Check for zero samples, assuming we only injected always one sample. + // Check for zero samples, assuming we only injected always one histogram sample. // Did it contain CT to inject? If yes, was CT zero enabled? - if tc.counterSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { + if tc.inputHistSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { require.Len(t, got, 2) - require.Equal(t, 0.0, got[0]) - require.Equal(t, tc.counterSample.GetValue(), got[1]) + // Zero sample. + require.Equal(t, histogram.Histogram{}, *got[0].h) + // Quick soft check to make sure it's the same sample or at least not zero. + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].h.Sum) return } // Expect only one, valid sample. require.Len(t, got, 1) - require.Equal(t, tc.counterSample.GetValue(), got[0]) + // Quick soft check to make sure it's the same sample or at least not zero. + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].h.Sum) }) } } @@ -894,7 +1125,7 @@ func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manage require.NoError(t, err) discoveryManager := discovery.NewManager( ctx, - log.NewNopLogger(), + promslog.NewNopLogger(), reg, sdMetrics, discovery.Updatert(100*time.Millisecond), @@ -1181,7 +1412,7 @@ scrape_configs: } // TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no -// longer discover targets, only the stale targets of that provier are dropped. +// longer discover targets, only the stale targets of that provider are dropped. func TestOnlyStaleTargetsAreDropped(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/scrape/scrape.go b/scrape/scrape.go index ea98432be6d..eeab208d658 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "net/http" "reflect" @@ -29,11 +30,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/klauspost/compress/gzip" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/config" @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/pool" ) @@ -63,7 +64,7 @@ var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels. // scrapePool manages scrapes for sets of targets. type scrapePool struct { appendable storage.Appendable - logger log.Logger + logger *slog.Logger cancel context.CancelFunc httpOpts []config_util.HTTPClientOption @@ -87,11 +88,9 @@ type scrapePool struct { // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop - noDefaultPort bool - metrics *scrapeMetrics - scrapeFailureLogger log.Logger + scrapeFailureLogger *logging.JSONFileLogger scrapeFailureLoggerMtx sync.RWMutex } @@ -113,8 +112,10 @@ type scrapeLoopOptions struct { trackTimestampsStaleness bool interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme + fallbackScrapeProtocol string mrc []*relabel.Config cache *scrapeCache @@ -126,9 +127,9 @@ const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop". type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) @@ -149,7 +150,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger: logger, metrics: metrics, httpOpts: options.HTTPClientOptions, - noDefaultPort: options.NoDefaultPort, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -162,7 +162,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed return newScrapeLoop( ctx, opts.scraper, - log.With(logger, "target", opts.target), + logger.With("target", opts.target), buffers, func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) @@ -181,7 +181,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.labelLimits, opts.interval, opts.timeout, - opts.scrapeClassicHistograms, + opts.alwaysScrapeClassicHist, + opts.convertClassicHistToNHCB, options.EnableNativeHistogramsIngestion, options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, @@ -191,6 +192,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed metrics, options.skipOffsetting, opts.validationScheme, + opts.fallbackScrapeProtocol, ) } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -221,11 +223,11 @@ func (sp *scrapePool) DroppedTargetsCount() int { return sp.droppedTargetsCount } -func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) { +func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { sp.scrapeFailureLoggerMtx.Lock() defer sp.scrapeFailureLoggerMtx.Unlock() if l != nil { - l = log.With(l, "job_name", sp.config.JobName) + l.With("job_name", sp.config.JobName) } sp.scrapeFailureLogger = l @@ -236,7 +238,7 @@ func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) { } } -func (sp *scrapePool) getScrapeFailureLogger() log.Logger { +func (sp *scrapePool) getScrapeFailureLogger() *logging.JSONFileLogger { sp.scrapeFailureLoggerMtx.RLock() defer sp.scrapeFailureLoggerMtx.RUnlock() return sp.scrapeFailureLogger @@ -327,11 +329,12 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs + fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() @@ -373,6 +376,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { interval: interval, timeout: timeout, validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, }) ) if err != nil { @@ -429,9 +433,9 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { sp.droppedTargets = []*Target{} sp.droppedTargetsCount = 0 for _, tg := range tgs { - targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) + targets, failures := TargetsFromGroup(tg, sp.config, targets, lb) for _, err := range failures { - level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) + sp.logger.Error("Creating target failed", "err", err) } sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) for _, t := range targets { @@ -482,12 +486,14 @@ func (sp *scrapePool) sync(targets []*Target) { enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs - scrapeClassicHistograms = sp.config.ScrapeClassicHistograms + fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() + alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() @@ -523,7 +529,10 @@ func (sp *scrapePool) sync(targets []*Target) { mrc: mrc, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, + convertClassicHistToNHCB: convertClassicHistToNHCB, + validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, }) if err != nil { l.setForcedError(err) @@ -851,7 +860,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w type loop interface { run(errc chan<- error) setForcedError(err error) - setScrapeFailureLogger(log.Logger) + setScrapeFailureLogger(*logging.JSONFileLogger) stop() getCache() *scrapeCache disableEndOfRunStalenessMarkers() @@ -866,8 +875,8 @@ type cacheEntry struct { type scrapeLoop struct { scraper scraper - l log.Logger - scrapeFailureLogger log.Logger + l *slog.Logger + scrapeFailureLogger *logging.JSONFileLogger scrapeFailureLoggerMtx sync.RWMutex cache *scrapeCache lastScrapeSize int @@ -884,8 +893,10 @@ type scrapeLoop struct { labelLimits *labelLimits interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme + fallbackScrapeProtocol string // Feature flagged options. enableNativeHistogramIngestion bool @@ -1167,7 +1178,7 @@ func (c *scrapeCache) LengthMetadata() int { func newScrapeLoop(ctx context.Context, sc scraper, - l log.Logger, + l *slog.Logger, buffers *pool.Pool, sampleMutator labelsMutator, reportSampleMutator labelsMutator, @@ -1184,7 +1195,8 @@ func newScrapeLoop(ctx context.Context, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, - scrapeClassicHistograms bool, + alwaysScrapeClassicHist bool, + convertClassicHistToNHCB bool, enableNativeHistogramIngestion bool, enableCTZeroIngestion bool, reportExtraMetrics bool, @@ -1194,9 +1206,10 @@ func newScrapeLoop(ctx context.Context, metrics *scrapeMetrics, skipOffsetting bool, validationScheme model.ValidationScheme, + fallbackScrapeProtocol string, ) *scrapeLoop { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if buffers == nil { buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) @@ -1238,7 +1251,8 @@ func newScrapeLoop(ctx context.Context, labelLimits: labelLimits, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, + convertClassicHistToNHCB: convertClassicHistToNHCB, enableNativeHistogramIngestion: enableNativeHistogramIngestion, enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, @@ -1246,17 +1260,18 @@ func newScrapeLoop(ctx context.Context, metrics: metrics, skipOffsetting: skipOffsetting, validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, } sl.ctx, sl.cancel = context.WithCancel(ctx) return sl } -func (sl *scrapeLoop) setScrapeFailureLogger(l log.Logger) { +func (sl *scrapeLoop) setScrapeFailureLogger(l *logging.JSONFileLogger) { sl.scrapeFailureLoggerMtx.Lock() defer sl.scrapeFailureLoggerMtx.Unlock() if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil { - l = log.With(l, "target", ts.String()) + l.With("target", ts.String()) } sl.scrapeFailureLogger = l } @@ -1354,13 +1369,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } err = app.Commit() if err != nil { - level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err) + sl.l.Error("Scrape commit failed", "err", err) } }() defer func() { if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil { - level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + sl.l.Warn("Appending scrape report failed", "err", err) } }() @@ -1370,7 +1385,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } if errc != nil { errc <- forcedErr @@ -1403,10 +1418,10 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } bytesRead = len(b) } else { - level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) + sl.l.Debug("Scrape failed", "err", scrapeErr) sl.scrapeFailureLoggerMtx.RLock() if sl.scrapeFailureLogger != nil { - sl.scrapeFailureLogger.Log("err", scrapeErr) + sl.scrapeFailureLogger.Log(context.Background(), slog.LevelError, scrapeErr.Error()) } sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { @@ -1423,13 +1438,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if appErr != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) + sl.l.Debug("Append failed", "err", appErr) // The append failed, probably due to a parse error or sample limit. // Call sl.append again with an empty scrape to trigger stale markers. if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } } @@ -1502,16 +1517,16 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int } err = app.Commit() if err != nil { - level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err) + sl.l.Warn("Stale commit failed", "err", err) } }() if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) + sl.l.Warn("Stale append failed", "err", err) } if err = sl.reportStale(app, staleTime); err != nil { - level.Warn(sl.l).Log("msg", "Stale report failed", "err", err) + sl.l.Warn("Stale report failed", "err", err) } } @@ -1537,18 +1552,56 @@ type appendErrors struct { numExemplarOutOfOrder int } +// Update the stale markers. +func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (err error) { + sl.cache.forEachStale(func(lset labels.Labels) bool { + // Series no longer exposed, mark it stale. + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) + _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) + app.SetOptions(nil) + switch { + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + // Do not count these in logging, as this is expected if a target + // goes away and comes back again with a new scrape loop. + err = nil + } + return err == nil + }) + return +} + func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { - p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable) + defTime := timestamp.FromTime(ts) + + if len(b) == 0 { + // Empty scrape. Just update the stale makers and swap the cache (but don't flush it). + err = sl.updateStaleMarkers(app, defTime) + sl.cache.iterDone(false) + return + } + + p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable) + if p == nil { + sl.l.Error( + "Failed to determine correct type of scrape target.", + "content_type", contentType, + "fallback_media_type", sl.fallbackScrapeProtocol, + "err", err, + ) + return + } + if sl.convertClassicHistToNHCB { + p = textparse.NewNHCBParser(p, sl.symbolTable, sl.alwaysScrapeClassicHist) + } if err != nil { - level.Debug(sl.l).Log( - "msg", "Invalid content type on scrape, using prometheus parser as fallback.", + sl.l.Debug( + "Invalid content type on scrape, using fallback setting.", "content_type", contentType, + "fallback_media_type", sl.fallbackScrapeProtocol, "err", err, ) } - var ( - defTime = timestamp.FromTime(ts) appErrs = appendErrors{} sampleLimitErr error bucketLimitErr error @@ -1558,7 +1611,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, metadataChanged bool ) - exemplars := make([]exemplar.Exemplar, 1) + exemplars := make([]exemplar.Exemplar, 0, 1) // updateMetadata updates the current iteration's metadata object and the // metadataChanged value if we have metadata in the scrape cache AND the @@ -1589,9 +1642,8 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, if err != nil { return } - // Only perform cache cleaning if the scrape was not empty. - // An empty scrape (usually) is used to indicate a failed scrape. - sl.cache.iterDone(len(b) > 0) + // Flush and swap the cache as the scrape was non-empty. + sl.cache.iterDone(true) }() loop: @@ -1700,11 +1752,19 @@ loop: } else { if sl.enableCTZeroIngestion { if ctMs := p.CreatedTimestamp(); ctMs != nil { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if isHistogram && sl.enableNativeHistogramIngestion { + if h != nil { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, h, nil) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, nil, fh) + } + } else { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + } if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. // CT is an experimental feature. For now, we don't need to fail the // scrape on errors updating the created timestamp, log debug. - level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + sl.l.Debug("Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) } } } @@ -1729,7 +1789,7 @@ loop: sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs) if err != nil { if !errors.Is(err, storage.ErrNotFound) { - level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) + sl.l.Debug("Unexpected error", "series", string(met), "err", err) } break loop } @@ -1781,21 +1841,21 @@ loop: outOfOrderExemplars++ default: // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. - level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) + sl.l.Debug("Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) } } if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) { // Only report out of order exemplars if all are out of order, otherwise this was a partial update // to some existing set of exemplars. appErrs.numExemplarOutOfOrder += outOfOrderExemplars - level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) + sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } if sl.appendMetadataToWAL && metadataChanged { if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { // No need to fail the scrape on errors appending metadata. - level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) + sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) } } } @@ -1814,29 +1874,19 @@ loop: sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc() } if appErrs.numOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) } if appErrs.numDuplicates > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) + sl.l.Warn("Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) } if appErrs.numOutOfBounds > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) + sl.l.Warn("Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) } if appErrs.numExemplarOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) } if err == nil { - sl.cache.forEachStale(func(lset labels.Labels) bool { - // Series no longer exposed, mark it stale. - _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) - switch { - case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): - // Do not count these in logging, as this is expected if a target - // goes away and comes back again with a new scrape loop. - err = nil - } - return err == nil - }) + err = sl.updateStaleMarkers(app, defTime) } return } @@ -1851,17 +1901,17 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke return false, storage.ErrNotFound case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ - level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) + sl.l.Debug("Out of order sample", "series", string(met)) sl.metrics.targetScrapeSampleOutOfOrder.Inc() return false, nil case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): appErrs.numDuplicates++ - level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) + sl.l.Debug("Duplicate sample for timestamp", "series", string(met)) sl.metrics.targetScrapeSampleDuplicate.Inc() return false, nil case errors.Is(err, storage.ErrOutOfBounds): appErrs.numOutOfBounds++ - level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) + sl.l.Debug("Out of bounds metric", "series", string(met)) sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil case errors.Is(err, errSampleLimit): @@ -1934,7 +1984,7 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { ts := timestamp.FromTime(start) - + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) stale := math.Float64frombits(value.StaleNaN) b := labels.NewBuilder(labels.EmptyLabels()) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a69a19d7f77..9ec1980677b 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -29,16 +29,18 @@ import ( "strings" "sync" "testing" + "text/template" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" + "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" @@ -53,6 +55,7 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/pool" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -83,6 +86,97 @@ func TestNewScrapePool(t *testing.T) { require.NotNil(t, sp.newLoop, "newLoop function not initialized.") } +func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) { + // Test with default OutOfOrderTimeWindow (0) + t.Run("Out-Of-Order Sample Disabled", func(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + runScrapeLoopTest(t, s, false) + }) + + // Test with specific OutOfOrderTimeWindow (600000) + t.Run("Out-Of-Order Sample Enabled", func(t *testing.T) { + s := teststorage.New(t, 600000) + defer s.Close() + + runScrapeLoopTest(t, s, true) + }) +} + +func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrder bool) { + // Create an appender for adding samples to the storage. + app := s.Appender(context.Background()) + capp := &collectResultAppender{next: app} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + + // Current time for generating timestamps. + now := time.Now() + + // Calculate timestamps for the samples based on the current time. + now = now.Truncate(time.Minute) // round down the now timestamp to the nearest minute + timestampInorder1 := now + timestampOutOfOrder := now.Add(-5 * time.Minute) + timestampInorder2 := now.Add(5 * time.Minute) + + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", timestampInorder1) + require.NoError(t, err) + + _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder) + require.NoError(t, err) + + _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 3`), "text/plain", timestampInorder2) + require.NoError(t, err) + + require.NoError(t, slApp.Commit()) + + // Query the samples back from the storage. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + defer q.Close() + + // Use a matcher to filter the metric name. + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a")) + + var results []floatSample + for series.Next() { + it := series.At().Iterator(nil) + for it.Next() == chunkenc.ValFloat { + t, v := it.At() + results = append(results, floatSample{ + metric: series.At().Labels(), + t: t, + f: v, + }) + } + require.NoError(t, it.Err()) + } + require.NoError(t, series.Err()) + + // Define the expected results + want := []floatSample{ + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: timestamp.FromTime(timestampInorder1), + f: 1, + }, + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: timestamp.FromTime(timestampInorder2), + f: 3, + }, + } + + if expectOutOfOrder { + require.NotEqual(t, want, results, "Expected results to include out-of-order sample:\n%s", results) + } else { + require.Equal(t, want, results, "Appended samples not as expected:\n%s", results) + } +} + func TestDroppedTargetsList(t *testing.T) { var ( app = &nopAppendable{} @@ -158,7 +252,7 @@ type testLoop struct { timeout time.Duration } -func (l *testLoop) setScrapeFailureLogger(log.Logger) { +func (l *testLoop) setScrapeFailureLogger(*logging.JSONFileLogger) { } func (l *testLoop) run(errc chan<- error) { @@ -395,7 +489,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { activeTargets: map[uint64]*Target{}, loops: map[uint64]loop{}, newLoop: newLoop, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), client: http.DefaultClient, metrics: newTestScrapeMetrics(t), symbolTable: labels.NewSymbolTable(), @@ -440,7 +534,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { lerr := l.(*testLoop).getForcedError() if shouldErr { require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) - require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error()) + require.EqualError(t, lerr, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit)) } else { require.NoError(t, lerr) } @@ -662,6 +756,10 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) { } func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration) *scrapeLoop { + return newBasicScrapeLoopWithFallback(t, ctx, scraper, app, interval, "") +} + +func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration, fallback string) *scrapeLoop { return newScrapeLoop(ctx, scraper, nil, nil, @@ -683,11 +781,13 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app false, false, false, + false, nil, false, newTestScrapeMetrics(t), false, model.LegacyValidation, + fallback, ) } @@ -748,7 +848,8 @@ func TestScrapeLoopStop(t *testing.T) { app = func(ctx context.Context) storage.Appender { return appender } ) - sl := newBasicScrapeLoop(t, context.Background(), scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, context.Background(), scraper, app, 10*time.Millisecond, "text/plain") // Terminate loop after 2 scrapes. numScrapes := 0 @@ -826,11 +927,13 @@ func TestScrapeLoopRun(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, false, model.LegacyValidation, + "", ) // The loop must terminate during the initial offset if the context @@ -971,11 +1074,13 @@ func TestScrapeLoopMetadata(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, false, model.LegacyValidation, + "", ) defer cancel() @@ -1026,7 +1131,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { ctx, sl := simpleTestScrapeLoop(t) slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 1, total) @@ -1034,7 +1139,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { require.Equal(t, 1, seriesAdded) slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, slApp.Commit()) require.NoError(t, err) require.Equal(t, 1, total) @@ -1043,6 +1148,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { } func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation s := teststorage.New(t) defer s.Close() ctx, cancel := context.WithCancel(context.Background()) @@ -1063,7 +1169,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { } slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) require.ErrorContains(t, err, "invalid metric name or label names") require.NoError(t, slApp.Rollback()) require.Equal(t, 1, total) @@ -1087,7 +1193,7 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { sl.validationScheme = model.LegacyValidation slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) require.ErrorContains(t, err, "invalid metric name or label names") require.NoError(t, slApp.Rollback()) require.Equal(t, 1, total) @@ -1098,7 +1204,7 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { sl.validationScheme = model.UTF8Validation slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.Equal(t, 1, total) require.Equal(t, 1, added) @@ -1128,7 +1234,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { for i := 0; i < b.N; i++ { ts = ts.Add(time.Second) - _, _, _, _ = sl.append(slApp, metrics, "", ts) + _, _, _, _ = sl.append(slApp, metrics, "text/plain", ts) } } @@ -1147,6 +1253,87 @@ func BenchmarkScrapeLoopAppendOM(b *testing.B) { } } +func TestSetOptionsHandlingStaleness(t *testing.T) { + s := teststorage.New(t, 600000) + defer s.Close() + + signal := make(chan struct{}, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Function to run the scrape loop + runScrapeLoop := func(ctx context.Context, t *testing.T, cue int, action func(*scrapeLoop)) { + var ( + scraper = &testScraper{} + app = func(ctx context.Context) storage.Appender { + return s.Appender(ctx) + } + ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + numScrapes := 0 + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + if numScrapes == cue { + action(sl) + } + w.Write([]byte(fmt.Sprintf("metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes))) + return nil + } + sl.run(nil) + } + go func() { + runScrapeLoop(ctx, t, 2, func(sl *scrapeLoop) { + go sl.stop() + // Wait a bit then start a new target. + time.Sleep(100 * time.Millisecond) + go func() { + runScrapeLoop(ctx, t, 4, func(_ *scrapeLoop) { + cancel() + }) + signal <- struct{}{} + }() + }) + }() + + select { + case <-signal: + case <-time.After(10 * time.Second): + t.Fatalf("Scrape wasn't stopped.") + } + + ctx1, cancel := context.WithCancel(context.Background()) + defer cancel() + + q, err := s.Querier(0, time.Now().UnixNano()) + + require.NoError(t, err) + defer q.Close() + + series := q.Select(ctx1, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a")) + + var results []floatSample + for series.Next() { + it := series.At().Iterator(nil) + for it.Next() == chunkenc.ValFloat { + t, v := it.At() + results = append(results, floatSample{ + metric: series.At().Labels(), + t: t, + f: v, + }) + } + require.NoError(t, it.Err()) + } + require.NoError(t, series.Err()) + var c int + for _, s := range results { + if value.IsStaleNaN(s.f) { + c++ + } + } + require.Equal(t, 0, c, "invalid count of staleness markers after stopping the engine") +} + func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { appender := &collectResultAppender{} var ( @@ -1156,7 +1343,8 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") // Succeed once, several failures, then stop. numScrapes := 0 @@ -1202,7 +1390,8 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") // Succeed once, several failures, then stop. scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { @@ -1253,7 +1442,8 @@ func TestScrapeLoopCache(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) // Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps. // See https://github.com/prometheus/prometheus/issues/12727. - sl := newBasicScrapeLoop(t, ctx, scraper, app, 100*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 100*time.Millisecond, "text/plain") numScrapes := 0 @@ -1418,7 +1608,7 @@ func TestScrapeLoopAppend(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", now) + _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1499,7 +1689,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) } slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) + _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "text/plain", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1523,7 +1713,8 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { fakeRef := storage.SeriesRef(1) expValue := float64(1) metric := []byte(`metric{n="1"} 1`) - p, warning := textparse.New(metric, "", false, labels.NewSymbolTable()) + p, warning := textparse.New(metric, "text/plain", "", false, false, labels.NewSymbolTable()) + require.NotNil(t, p) require.NoError(t, warning) var lset labels.Labels @@ -1536,7 +1727,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, metric, "", now) + _, _, _, err := sl.append(slApp, metric, "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1573,7 +1764,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "", now) + total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "text/plain", now) require.ErrorIs(t, err, errSampleLimit) require.NoError(t, slApp.Rollback()) require.Equal(t, 3, total) @@ -1602,7 +1793,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { now = time.Now() slApp = sl.appender(context.Background()) - total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "", now) + total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "text/plain", now) require.ErrorIs(t, err, errSampleLimit) require.NoError(t, slApp.Rollback()) require.Equal(t, 9, total) @@ -1730,12 +1921,12 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "", now) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "", now.Add(time.Minute)) + _, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "text/plain", now.Add(time.Minute)) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1754,6 +1945,33 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) } +func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) { + app := &collectResultAppender{} + + // Explicitly setting the lack of fallback protocol here to make it obvious. + sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "") + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now) + // We expect the appropriate error. + require.ErrorContains(t, err, "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target", "Expected \"non-compliant scrape\" error but got: %s", err) +} + +func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) { + // This test ensures we there are no errors when we get a blank scrape or just want to append a stale marker. + app := &collectResultAppender{} + + // Explicitly setting the lack of fallback protocol here to make it obvious. + sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "") + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(""), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) +} + func TestScrapeLoopAppendStaleness(t *testing.T) { app := &collectResultAppender{} @@ -1761,7 +1979,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now) + _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1790,7 +2008,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) + _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1816,7 +2034,7 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) + _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1843,7 +2061,7 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { func TestScrapeLoopAppendExemplar(t *testing.T) { tests := []struct { title string - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool enableNativeHistogramsIngestion bool scrapeText string contentType string @@ -1997,7 +2215,8 @@ metric: < `, contentType: "application/vnd.google.protobuf", histograms: []histogramSample{{ - t: 1234568, + t: 1234568, + metric: labels.FromStrings("__name__", "test_histogram"), h: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2111,7 +2330,7 @@ metric: < > `, - scrapeClassicHistograms: true, + alwaysScrapeClassicHist: true, contentType: "application/vnd.google.protobuf", floats: []floatSample{ {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175}, @@ -2123,7 +2342,8 @@ metric: < {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, }, histograms: []histogramSample{{ - t: 1234568, + t: 1234568, + metric: labels.FromStrings("__name__", "test_histogram"), h: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2172,7 +2392,7 @@ metric: < sl.reportSampleMutator = func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, discoveryLabels) } - sl.scrapeClassicHistograms = test.scrapeClassicHistograms + sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist now := time.Now() @@ -2334,7 +2554,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T now := time.Unix(1, 0) slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "", now) + total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2365,7 +2585,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { now := time.Now().Add(20 * time.Minute) slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "", now) + total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 1, total) @@ -2379,8 +2599,11 @@ func TestTargetScraperScrapeOK(t *testing.T) { expectedTimeout = "1.5" ) - var protobufParsing bool - var allowUTF8 bool + var ( + protobufParsing bool + allowUTF8 bool + qValuePattern = regexp.MustCompile(`q=([0-9]+(\.\d+)?)`) + ) server := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -2393,6 +2616,17 @@ func TestTargetScraperScrapeOK(t *testing.T) { "Expected Accept header to prefer application/vnd.google.protobuf.") } + contentTypes := strings.Split(accept, ",") + for _, ct := range contentTypes { + match := qValuePattern.FindStringSubmatch(ct) + require.Len(t, match, 3) + qValue, err := strconv.ParseFloat(match[1], 64) + require.NoError(t, err, "Error parsing q value") + require.GreaterOrEqual(t, qValue, float64(0)) + require.LessOrEqual(t, qValue, float64(1)) + require.LessOrEqual(t, len(strings.Split(match[1], ".")[1]), 3, "q value should have at most 3 decimal places") + } + timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds") require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.") @@ -2531,7 +2765,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) { resp, err := ts.scrape(context.Background()) require.NoError(t, err) _, err = ts.readResponse(context.Background(), resp, io.Discard) - require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err) + require.ErrorContains(t, err, "404", "Expected \"404 NotFound\" error but got: %s", err) } func TestTargetScraperBodySizeLimit(t *testing.T) { @@ -2651,7 +2885,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2678,7 +2912,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2702,7 +2936,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { // We add a good and a bad metric to check that both are discarded. slApp := sl.appender(ctx) - _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{}) + _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "text/plain", time.Time{}) require.Error(t, err) require.NoError(t, slApp.Rollback()) // We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them, @@ -2717,7 +2951,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { // We add a good metric to check that it is recorded. slApp = sl.appender(ctx) - _, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "", time.Time{}) + _, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2746,7 +2980,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { defer cancel() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "", time.Time{}) + _, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "text/plain", time.Time{}) require.Error(t, err) require.NoError(t, slApp.Rollback()) require.Equal(t, errNameLabelMandatory, err) @@ -2992,7 +3226,7 @@ func TestScrapeAddFast(t *testing.T) { defer cancel() slApp := sl.appender(ctx) - _, _, _, err := sl.append(slApp, []byte("up 1\n"), "", time.Time{}) + _, _, _, err := sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -3003,7 +3237,7 @@ func TestScrapeAddFast(t *testing.T) { } slApp = sl.appender(ctx) - _, _, _, err = sl.append(slApp, []byte("up 1\n"), "", time.Time{}.Add(time.Second)) + _, _, _, err = sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{}.Add(time.Second)) require.NoError(t, err) require.NoError(t, slApp.Commit()) } @@ -3043,7 +3277,7 @@ func TestReuseCacheRace(t *testing.T) { func TestCheckAddError(t *testing.T) { var appErrs appendErrors - sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)} + sl := scrapeLoop{l: promslog.NewNopLogger(), metrics: newTestScrapeMetrics(t)} sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) require.Equal(t, 1, appErrs.numOutOfOrder) } @@ -3058,7 +3292,8 @@ func TestScrapeReportSingleAppender(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, s.Appender, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, s.Appender, 10*time.Millisecond, "text/plain") numScrapes := 0 @@ -3114,18 +3349,7 @@ func TestScrapeReportLimit(t *testing.T) { ScrapeTimeout: model.Duration(100 * time.Millisecond), } - var ( - scrapes int - scrapedTwice = make(chan bool) - ) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n") - scrapes++ - if scrapes == 2 { - close(scrapedTwice) - } - })) + ts, scrapedTwice := newScrapableServer("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n") defer ts.Close() sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) @@ -3168,6 +3392,52 @@ func TestScrapeReportLimit(t *testing.T) { require.True(t, found) } +func TestScrapeUTF8(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + model.NameValidationScheme = model.UTF8Validation + t.Cleanup(func() { model.NameValidationScheme = model.LegacyValidation }) + + cfg := &config.ScrapeConfig{ + JobName: "test", + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + MetricNameValidationScheme: config.UTF8ValidationConfig, + } + ts, scrapedTwice := newScrapableServer("{\"with.dots\"} 42\n") + defer ts.Close() + + sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + require.NoError(t, err) + defer sp.stop() + + testURL, err := url.Parse(ts.URL) + require.NoError(t, err) + sp.Sync([]*targetgroup.Group{ + { + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, + }, + }) + + select { + case <-time.After(5 * time.Second): + t.Fatalf("target was not scraped twice") + case <-scrapedTwice: + // If the target has been scraped twice, report samples from the first + // scrape have been inserted in the database. + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + defer q.Close() + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "with.dots")) + + require.True(t, series.Next(), "series not found in tsdb") +} + func TestScrapeLoopLabelLimit(t *testing.T) { tests := []struct { title string @@ -3250,7 +3520,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { sl.labelLimits = &test.labelLimits slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", time.Now()) + _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", time.Now()) t.Logf("Test:%s", test.title) if test.expectErr { @@ -3364,16 +3634,7 @@ test_summary_count 199 // The expected "quantile" values do not have the trailing ".0". expectedQuantileValues := []string{"0.5", "0.9", "0.95", "0.99", "1"} - scrapeCount := 0 - scraped := make(chan bool) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, metricsText) - scrapeCount++ - if scrapeCount > 2 { - close(scraped) - } - })) + ts, scrapedTwice := newScrapableServer(metricsText) defer ts.Close() sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) @@ -3392,7 +3653,7 @@ test_summary_count 199 select { case <-time.After(5 * time.Second): t.Fatalf("target was not scraped") - case <-scraped: + case <-scrapedTwice: } ctx, cancel := context.WithCancel(context.Background()) @@ -3424,6 +3685,524 @@ test_summary_count 199 checkValues("quantile", expectedQuantileValues, series) } +// Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets. +func TestConvertClassicHistogramsToNHCB(t *testing.T) { + genTestCounterText := func(name string, value int, withMetadata bool) string { + if withMetadata { + return fmt.Sprintf(` +# HELP %s some help text +# TYPE %s counter +%s{address="0.0.0.0",port="5001"} %d +`, name, name, name, value) + } + return fmt.Sprintf(` +%s %d +`, name, value) + } + genTestHistText := func(name string, withMetadata bool) string { + data := map[string]interface{}{ + "name": name, + } + b := &bytes.Buffer{} + if withMetadata { + template.Must(template.New("").Parse(` +# HELP {{.name}} This is a histogram with default buckets +# TYPE {{.name}} histogram +`)).Execute(b, data) + } + template.Must(template.New("").Parse(` +{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.005"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.01"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.025"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.05"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.1"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.25"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.5"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="1"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="2.5"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="5"} 0 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="10"} 1 +{{.name}}_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1 +{{.name}}_sum{address="0.0.0.0",port="5001"} 10 +{{.name}}_count{address="0.0.0.0",port="5001"} 1 +`)).Execute(b, data) + return b.String() + } + genTestCounterProto := func(name string, value int) string { + return fmt.Sprintf(` +name: "%s" +help: "some help text" +type: COUNTER +metric: < + label: < + name: "address" + value: "0.0.0.0" + > + label: < + name: "port" + value: "5001" + > + counter: < + value: %d + > +> +`, name, value) + } + genTestHistProto := func(name string, hasClassic, hasExponential bool) string { + var classic string + if hasClassic { + classic = ` +bucket: < + cumulative_count: 0 + upper_bound: 0.005 +> +bucket: < + cumulative_count: 0 + upper_bound: 0.01 +> +bucket: < + cumulative_count: 0 + upper_bound: 0.025 +> +bucket: < + cumulative_count: 0 + upper_bound: 0.05 +> +bucket: < + cumulative_count: 0 + upper_bound: 0.1 +> +bucket: < + cumulative_count: 0 + upper_bound: 0.25 +> +bucket: < + cumulative_count: 0 + upper_bound: 0.5 +> +bucket: < + cumulative_count: 0 + upper_bound: 1 +> +bucket: < + cumulative_count: 0 + upper_bound: 2.5 +> +bucket: < + cumulative_count: 0 + upper_bound: 5 +> +bucket: < + cumulative_count: 1 + upper_bound: 10 +>` + } + var expo string + if hasExponential { + expo = ` +schema: 3 +zero_threshold: 2.938735877055719e-39 +zero_count: 0 +positive_span: < + offset: 2 + length: 1 +> +positive_delta: 1` + } + return fmt.Sprintf(` +name: "%s" +help: "This is a histogram with default buckets" +type: HISTOGRAM +metric: < + label: < + name: "address" + value: "0.0.0.0" + > + label: < + name: "port" + value: "5001" + > + histogram: < + sample_count: 1 + sample_sum: 10 + %s + %s + > + timestamp_ms: 1234568 +> +`, name, classic, expo) + } + + metricsTexts := map[string]struct { + text []string + contentType string + hasClassic bool + hasExponential bool + }{ + "text": { + text: []string{ + genTestCounterText("test_metric_1", 1, true), + genTestCounterText("test_metric_1_count", 1, true), + genTestCounterText("test_metric_1_sum", 1, true), + genTestCounterText("test_metric_1_bucket", 1, true), + genTestHistText("test_histogram_1", true), + genTestCounterText("test_metric_2", 1, true), + genTestCounterText("test_metric_2_count", 1, true), + genTestCounterText("test_metric_2_sum", 1, true), + genTestCounterText("test_metric_2_bucket", 1, true), + genTestHistText("test_histogram_2", true), + genTestCounterText("test_metric_3", 1, true), + genTestCounterText("test_metric_3_count", 1, true), + genTestCounterText("test_metric_3_sum", 1, true), + genTestCounterText("test_metric_3_bucket", 1, true), + genTestHistText("test_histogram_3", true), + }, + hasClassic: true, + }, + "text, in different order": { + text: []string{ + genTestCounterText("test_metric_1", 1, true), + genTestCounterText("test_metric_1_count", 1, true), + genTestCounterText("test_metric_1_sum", 1, true), + genTestCounterText("test_metric_1_bucket", 1, true), + genTestHistText("test_histogram_1", true), + genTestCounterText("test_metric_2", 1, true), + genTestCounterText("test_metric_2_count", 1, true), + genTestCounterText("test_metric_2_sum", 1, true), + genTestCounterText("test_metric_2_bucket", 1, true), + genTestHistText("test_histogram_2", true), + genTestHistText("test_histogram_3", true), + genTestCounterText("test_metric_3", 1, true), + genTestCounterText("test_metric_3_count", 1, true), + genTestCounterText("test_metric_3_sum", 1, true), + genTestCounterText("test_metric_3_bucket", 1, true), + }, + hasClassic: true, + }, + "protobuf": { + text: []string{ + genTestCounterProto("test_metric_1", 1), + genTestCounterProto("test_metric_1_count", 1), + genTestCounterProto("test_metric_1_sum", 1), + genTestCounterProto("test_metric_1_bucket", 1), + genTestHistProto("test_histogram_1", true, false), + genTestCounterProto("test_metric_2", 1), + genTestCounterProto("test_metric_2_count", 1), + genTestCounterProto("test_metric_2_sum", 1), + genTestCounterProto("test_metric_2_bucket", 1), + genTestHistProto("test_histogram_2", true, false), + genTestCounterProto("test_metric_3", 1), + genTestCounterProto("test_metric_3_count", 1), + genTestCounterProto("test_metric_3_sum", 1), + genTestCounterProto("test_metric_3_bucket", 1), + genTestHistProto("test_histogram_3", true, false), + }, + contentType: "application/vnd.google.protobuf", + hasClassic: true, + }, + "protobuf, in different order": { + text: []string{ + genTestHistProto("test_histogram_1", true, false), + genTestCounterProto("test_metric_1", 1), + genTestCounterProto("test_metric_1_count", 1), + genTestCounterProto("test_metric_1_sum", 1), + genTestCounterProto("test_metric_1_bucket", 1), + genTestHistProto("test_histogram_2", true, false), + genTestCounterProto("test_metric_2", 1), + genTestCounterProto("test_metric_2_count", 1), + genTestCounterProto("test_metric_2_sum", 1), + genTestCounterProto("test_metric_2_bucket", 1), + genTestHistProto("test_histogram_3", true, false), + genTestCounterProto("test_metric_3", 1), + genTestCounterProto("test_metric_3_count", 1), + genTestCounterProto("test_metric_3_sum", 1), + genTestCounterProto("test_metric_3_bucket", 1), + }, + contentType: "application/vnd.google.protobuf", + hasClassic: true, + }, + "protobuf, with additional native exponential histogram": { + text: []string{ + genTestCounterProto("test_metric_1", 1), + genTestCounterProto("test_metric_1_count", 1), + genTestCounterProto("test_metric_1_sum", 1), + genTestCounterProto("test_metric_1_bucket", 1), + genTestHistProto("test_histogram_1", true, true), + genTestCounterProto("test_metric_2", 1), + genTestCounterProto("test_metric_2_count", 1), + genTestCounterProto("test_metric_2_sum", 1), + genTestCounterProto("test_metric_2_bucket", 1), + genTestHistProto("test_histogram_2", true, true), + genTestCounterProto("test_metric_3", 1), + genTestCounterProto("test_metric_3_count", 1), + genTestCounterProto("test_metric_3_sum", 1), + genTestCounterProto("test_metric_3_bucket", 1), + genTestHistProto("test_histogram_3", true, true), + }, + contentType: "application/vnd.google.protobuf", + hasClassic: true, + hasExponential: true, + }, + "protobuf, with only native exponential histogram": { + text: []string{ + genTestCounterProto("test_metric_1", 1), + genTestCounterProto("test_metric_1_count", 1), + genTestCounterProto("test_metric_1_sum", 1), + genTestCounterProto("test_metric_1_bucket", 1), + genTestHistProto("test_histogram_1", false, true), + genTestCounterProto("test_metric_2", 1), + genTestCounterProto("test_metric_2_count", 1), + genTestCounterProto("test_metric_2_sum", 1), + genTestCounterProto("test_metric_2_bucket", 1), + genTestHistProto("test_histogram_2", false, true), + genTestCounterProto("test_metric_3", 1), + genTestCounterProto("test_metric_3_count", 1), + genTestCounterProto("test_metric_3_sum", 1), + genTestCounterProto("test_metric_3_bucket", 1), + genTestHistProto("test_histogram_3", false, true), + }, + contentType: "application/vnd.google.protobuf", + hasExponential: true, + }, + } + + checkBucketValues := func(expectedCount int, series storage.SeriesSet) { + labelName := "le" + var expectedValues []string + if expectedCount > 0 { + expectedValues = []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1.0", "2.5", "5.0", "10.0", "+Inf"} + } + foundLeValues := map[string]bool{} + + for series.Next() { + s := series.At() + v := s.Labels().Get(labelName) + require.NotContains(t, foundLeValues, v, "duplicate label value found") + foundLeValues[v] = true + } + + require.Equal(t, len(expectedValues), len(foundLeValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues) + for _, v := range expectedValues { + require.Contains(t, foundLeValues, v, "label value not found") + } + } + + // Checks that the expected series is present and runs a basic sanity check of the float values. + checkFloatSeries := func(series storage.SeriesSet, expectedCount int, expectedFloat float64) { + count := 0 + for series.Next() { + i := series.At().Iterator(nil) + loop: + for { + switch i.Next() { + case chunkenc.ValNone: + break loop + case chunkenc.ValFloat: + _, f := i.At() + require.Equal(t, expectedFloat, f) + case chunkenc.ValHistogram: + panic("unexpected value type: histogram") + case chunkenc.ValFloatHistogram: + panic("unexpected value type: float histogram") + default: + panic("unexpected value type") + } + } + count++ + } + require.Equal(t, expectedCount, count, "number of float series not as expected") + } + + // Checks that the expected series is present and runs a basic sanity check of the histogram values. + checkHistSeries := func(series storage.SeriesSet, expectedCount int, expectedSchema int32) { + count := 0 + for series.Next() { + i := series.At().Iterator(nil) + loop: + for { + switch i.Next() { + case chunkenc.ValNone: + break loop + case chunkenc.ValFloat: + panic("unexpected value type: float") + case chunkenc.ValHistogram: + _, h := i.AtHistogram(nil) + require.Equal(t, expectedSchema, h.Schema) + require.Equal(t, uint64(1), h.Count) + require.Equal(t, 10.0, h.Sum) + case chunkenc.ValFloatHistogram: + _, h := i.AtFloatHistogram(nil) + require.Equal(t, expectedSchema, h.Schema) + require.Equal(t, uint64(1), h.Count) + require.Equal(t, 10.0, h.Sum) + default: + panic("unexpected value type") + } + } + count++ + } + require.Equal(t, expectedCount, count, "number of histogram series not as expected") + } + + for metricsTextName, metricsText := range metricsTexts { + for name, tc := range map[string]struct { + alwaysScrapeClassicHistograms bool + convertClassicHistToNHCB bool + }{ + "convert with scrape": { + alwaysScrapeClassicHistograms: true, + convertClassicHistToNHCB: true, + }, + "convert without scrape": { + alwaysScrapeClassicHistograms: false, + convertClassicHistToNHCB: true, + }, + "scrape without convert": { + alwaysScrapeClassicHistograms: true, + convertClassicHistToNHCB: false, + }, + "neither scrape nor convert": { + alwaysScrapeClassicHistograms: false, + convertClassicHistToNHCB: false, + }, + } { + var expectedClassicHistCount, expectedNativeHistCount int + var expectCustomBuckets bool + if metricsText.hasExponential { + expectedNativeHistCount = 1 + expectCustomBuckets = false + expectedClassicHistCount = 0 + if metricsText.hasClassic && tc.alwaysScrapeClassicHistograms { + expectedClassicHistCount = 1 + } + } else if metricsText.hasClassic { + switch { + case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB: + expectedClassicHistCount = 1 + expectedNativeHistCount = 1 + expectCustomBuckets = true + case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB: + expectedClassicHistCount = 0 + expectedNativeHistCount = 1 + expectCustomBuckets = true + case !tc.convertClassicHistToNHCB: + expectedClassicHistCount = 1 + expectedNativeHistCount = 0 + } + } + + t.Run(fmt.Sprintf("%s with %s", name, metricsTextName), func(t *testing.T) { + simpleStorage := teststorage.New(t) + defer simpleStorage.Close() + + config := &config.ScrapeConfig{ + JobName: "test", + SampleLimit: 100, + Scheme: "http", + ScrapeInterval: model.Duration(50 * time.Millisecond), + ScrapeTimeout: model.Duration(25 * time.Millisecond), + AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms, + ConvertClassicHistogramsToNHCB: tc.convertClassicHistToNHCB, + } + + scrapeCount := 0 + scraped := make(chan bool) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if metricsText.contentType != "" { + w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) + for _, text := range metricsText.text { + buf := &bytes.Buffer{} + // In case of protobuf, we have to create the binary representation. + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(text, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf))) + buf.Write(varintBuf) + buf.Write(protoBuf) + w.Write(buf.Bytes()) + } + } else { + for _, text := range metricsText.text { + fmt.Fprint(w, text) + } + } + scrapeCount++ + if scrapeCount > 2 { + close(scraped) + } + })) + defer ts.Close() + + sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, newTestScrapeMetrics(t)) + require.NoError(t, err) + defer sp.stop() + + testURL, err := url.Parse(ts.URL) + require.NoError(t, err) + sp.Sync([]*targetgroup.Group{ + { + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, + }, + }) + require.Len(t, sp.ActiveTargets(), 1) + + select { + case <-time.After(5 * time.Second): + t.Fatalf("target was not scraped") + case <-scraped: + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + defer q.Close() + + var series storage.SeriesSet + + for i := 1; i <= 3; i++ { + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d", i))) + checkFloatSeries(series, 1, 1.) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_count", i))) + checkFloatSeries(series, 1, 1.) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_sum", i))) + checkFloatSeries(series, 1, 1.) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_bucket", i))) + checkFloatSeries(series, 1, 1.) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_count", i))) + checkFloatSeries(series, expectedClassicHistCount, 1.) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_sum", i))) + checkFloatSeries(series, expectedClassicHistCount, 10.) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_bucket", i))) + checkBucketValues(expectedClassicHistCount, series) + + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d", i))) + + var expectedSchema int32 + if expectCustomBuckets { + expectedSchema = histogram.CustomBucketsSchema + } else { + expectedSchema = 3 + } + checkHistSeries(series, expectedNativeHistCount, expectedSchema) + } + }) + } + } +} + func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) { appender := &collectResultAppender{} var ( @@ -3433,7 +4212,8 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") sl.trackTimestampsStaleness = true // Succeed once, several failures, then stop. numScrapes := 0 @@ -3461,7 +4241,6 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * case <-time.After(5 * time.Second): t.Fatalf("Scrape wasn't stopped.") } - // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) @@ -3679,7 +4458,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { ctx, sl := simpleTestScrapeLoop(t) slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 3, total) @@ -3688,7 +4467,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { require.Equal(t, 2.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 3, total) @@ -3698,7 +4477,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { // When different timestamps are supplied, multiple samples are accepted. slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 3, total) @@ -3729,7 +4508,9 @@ func TestNativeHistogramMaxSchemaSet(t *testing.T) { }, } for name, tc := range testcases { + tc := tc t.Run(name, func(t *testing.T) { + t.Parallel() testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema) }) } @@ -3771,8 +4552,9 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec // Create a scrape loop with the HTTP server as the target. configStr := fmt.Sprintf(` global: - scrape_interval: 1s - scrape_timeout: 1s + metric_name_validation_scheme: legacy + scrape_interval: 50ms + scrape_timeout: 25ms scrape_configs: - job_name: test %s @@ -3785,9 +4567,9 @@ scrape_configs: s.DB.EnableNativeHistograms() reg := prometheus.NewRegistry() - mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) + mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) require.NoError(t, err) - cfg, err := config.Load(configStr, false, log.NewNopLogger()) + cfg, err := config.Load(configStr, promslog.NewNopLogger()) require.NoError(t, err) mng.ApplyConfig(cfg) tsets := make(chan map[string][]*targetgroup.Group) @@ -3816,7 +4598,7 @@ scrape_configs: countSeries++ } return countSeries > 0 - }, 15*time.Second, 100*time.Millisecond) + }, 5*time.Second, 100*time.Millisecond) // Check that native histogram schema is as expected. q, err := s.Querier(0, math.MaxInt64) @@ -3841,3 +4623,174 @@ scrape_configs: require.Equal(t, expectedSchema, h.Schema) } } + +func TestTargetScrapeConfigWithLabels(t *testing.T) { + const ( + configTimeout = 1500 * time.Millisecond + expectedTimeout = "1.5" + expectedTimeoutLabel = "1s500ms" + secondTimeout = 500 * time.Millisecond + secondTimeoutLabel = "500ms" + expectedParam = "value1" + secondParam = "value2" + expectedPath = "/metric-ok" + secondPath = "/metric-nok" + httpScheme = "http" + paramLabel = "__param_param" + jobName = "test" + ) + + createTestServer := func(t *testing.T, done chan struct{}) *url.URL { + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(done) + require.Equal(t, expectedTimeout, r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")) + require.Equal(t, expectedParam, r.URL.Query().Get("param")) + require.Equal(t, expectedPath, r.URL.Path) + + w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + w.Write([]byte("metric_a 1\nmetric_b 2\n")) + }), + ) + t.Cleanup(server.Close) + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + return serverURL + } + + run := func(t *testing.T, cfg *config.ScrapeConfig, targets []*targetgroup.Group) chan struct{} { + done := make(chan struct{}) + srvURL := createTestServer(t, done) + + // Update target addresses to use the dynamically created server URL. + for _, target := range targets { + for i := range target.Targets { + target.Targets[i][model.AddressLabel] = model.LabelValue(srvURL.Host) + } + } + + sp, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + require.NoError(t, err) + t.Cleanup(sp.stop) + + sp.Sync(targets) + return done + } + + cases := []struct { + name string + cfg *config.ScrapeConfig + targets []*targetgroup.Group + }{ + { + name: "Everything in scrape config", + cfg: &config.ScrapeConfig{ + ScrapeInterval: model.Duration(2 * time.Second), + ScrapeTimeout: model.Duration(configTimeout), + Params: url.Values{"param": []string{expectedParam}}, + JobName: jobName, + Scheme: httpScheme, + MetricsPath: expectedPath, + }, + targets: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: model.LabelValue("")}, + }, + }, + }, + }, + { + name: "Overridden in target", + cfg: &config.ScrapeConfig{ + ScrapeInterval: model.Duration(2 * time.Second), + ScrapeTimeout: model.Duration(secondTimeout), + JobName: jobName, + Scheme: httpScheme, + MetricsPath: secondPath, + Params: url.Values{"param": []string{secondParam}}, + }, + targets: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(""), + model.ScrapeTimeoutLabel: expectedTimeoutLabel, + model.MetricsPathLabel: expectedPath, + paramLabel: expectedParam, + }, + }, + }, + }, + }, + { + name: "Overridden in relabel_config", + cfg: &config.ScrapeConfig{ + ScrapeInterval: model.Duration(2 * time.Second), + ScrapeTimeout: model.Duration(secondTimeout), + JobName: jobName, + Scheme: httpScheme, + MetricsPath: secondPath, + Params: url.Values{"param": []string{secondParam}}, + RelabelConfigs: []*relabel.Config{ + { + Action: relabel.DefaultRelabelConfig.Action, + Regex: relabel.DefaultRelabelConfig.Regex, + SourceLabels: relabel.DefaultRelabelConfig.SourceLabels, + TargetLabel: model.ScrapeTimeoutLabel, + Replacement: expectedTimeoutLabel, + }, + { + Action: relabel.DefaultRelabelConfig.Action, + Regex: relabel.DefaultRelabelConfig.Regex, + SourceLabels: relabel.DefaultRelabelConfig.SourceLabels, + TargetLabel: paramLabel, + Replacement: expectedParam, + }, + { + Action: relabel.DefaultRelabelConfig.Action, + Regex: relabel.DefaultRelabelConfig.Regex, + SourceLabels: relabel.DefaultRelabelConfig.SourceLabels, + TargetLabel: model.MetricsPathLabel, + Replacement: expectedPath, + }, + }, + }, + targets: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(""), + model.ScrapeTimeoutLabel: secondTimeoutLabel, + model.MetricsPathLabel: secondPath, + paramLabel: secondParam, + }, + }, + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + select { + case <-run(t, c.cfg, c.targets): + case <-time.After(10 * time.Second): + t.Fatal("timeout after 10 seconds") + } + }) + } +} + +func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice chan bool) { + var scrapes int + scrapedTwice = make(chan bool) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, scrapeText) + scrapes++ + if scrapes == 2 { + close(scrapedTwice) + } + })), scrapedTwice +} diff --git a/scrape/target.go b/scrape/target.go index 9ef4471fbd1..06d4737ff90 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "hash/fnv" - "net" "net/url" "strings" "sync" @@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels // PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -441,8 +440,8 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // Encode scrape query parameters as labels. for k, v := range cfg.Params { - if len(v) > 0 { - lb.Set(model.ParamLabelPrefix+k, v[0]) + if name := model.ParamLabelPrefix + k; len(v) > 0 && lb.Get(name) == "" { + lb.Set(name, v[0]) } } @@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") } - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) (string, string, bool) { - // If we can split, a port exists and we don't have to add one. - if host, port, err := net.SplitHostPort(s); err == nil { - return host, port, false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return "", "", err == nil - } - addr := lb.Get(model.AddressLabel) - scheme := lb.Get(model.SchemeLabel) - host, port, add := addPort(addr) - // If it's an address with no trailing port, infer it based on the used scheme - // unless the no-default-scrape-port feature flag is present. - if !noDefaultPort && add { - // Addresses reaching this point are already wrapped in [] if necessary. - switch scheme { - case "http", "": - addr += ":80" - case "https": - addr += ":443" - default: - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - - if noDefaultPort { - // If it's an address with a trailing default port and the - // no-default-scrape-port flag is present, remove the port. - switch port { - case "80": - if scheme == "http" { - lb.Set(model.AddressLabel, host) - } - case "443": - if scheme == "https" { - lb.Set(model.AddressLabel, host) - } - } - } if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return labels.EmptyLabels(), labels.EmptyLabels(), err @@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // TargetsFromGroup builds targets based on the given TargetGroup and config. -func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) { targets = targets[:0] failures := []error{} @@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault } } - lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) + lset, origLabels, err := PopulateLabels(lb, cfg) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) } diff --git a/scrape/target_test.go b/scrape/target_test.go index 84fe078b2b8..bd279528742 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -348,7 +348,7 @@ func TestTargetsFromGroup(t *testing.T) { ScrapeInterval: model.Duration(1 * time.Minute), } lb := labels.NewBuilder(labels.EmptyLabels()) - targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false, nil, lb) + targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, nil, lb) require.Len(t, targets, 1) require.Len(t, failures, 1) require.EqualError(t, failures[0], expectedError) @@ -435,7 +435,7 @@ scrape_configs: lb := labels.NewBuilder(labels.EmptyLabels()) group := &targetgroup.Group{Targets: targets} for i := 0; i < b.N; i++ { - tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], false, tgets, lb) + tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], tgets, lb) if len(targets) != nTargets { b.Fatalf("Expected %d targets, got %d", nTargets, len(targets)) } diff --git a/scripts/compress_assets.sh b/scripts/compress_assets.sh index 6608677bbfe..19e1e224868 100755 --- a/scripts/compress_assets.sh +++ b/scripts/compress_assets.sh @@ -4,6 +4,12 @@ set -euo pipefail +export STATIC_DIR=static +PREBUILT_ASSETS_STATIC_DIR=${PREBUILT_ASSETS_STATIC_DIR:-} +if [ -n "$PREBUILT_ASSETS_STATIC_DIR" ]; then + STATIC_DIR=$(realpath $PREBUILT_ASSETS_STATIC_DIR) +fi + cd web/ui cp embed.go.tmpl embed.go @@ -11,6 +17,19 @@ GZIP_OPTS="-fk" # gzip option '-k' may not always exist in the latest gzip available on different distros. if ! gzip -k -h &>/dev/null; then GZIP_OPTS="-f"; fi +mkdir -p static find static -type f -name '*.gz' -delete -find static -type f -exec gzip $GZIP_OPTS '{}' \; -print0 | xargs -0 -I % echo %.gz | sort | xargs echo //go:embed >> embed.go + +# Compress files from the prebuilt static directory and replicate the structure in the current static directory +find "${STATIC_DIR}" -type f ! -name '*.gz' -exec bash -c ' + for file; do + dest="${file#${STATIC_DIR}}" + mkdir -p "static/$(dirname "$dest")" + gzip '"$GZIP_OPTS"' "$file" -c > "static/${dest}.gz" + done +' bash {} + + +# Append the paths of gzipped files to embed.go +find static -type f -name '*.gz' -print0 | sort -z | xargs -0 echo //go:embed >> embed.go + echo var EmbedFS embed.FS >> embed.go diff --git a/scripts/get_module_version.sh b/scripts/get_module_version.sh new file mode 100755 index 00000000000..e3855870444 --- /dev/null +++ b/scripts/get_module_version.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# if no version string is passed as an argument, read VERSION file +if [ $# -eq 0 ]; then + VERSION="$(< VERSION)" +else + VERSION=$1 +fi + + +# Remove leading 'v' if present +VERSION="${VERSION#v}" + +# Extract MAJOR, MINOR, and REST +MAJOR="${VERSION%%.*}" +MINOR="${VERSION#*.}"; MINOR="${MINOR%%.*}" +REST="${VERSION#*.*.}" + +# Format and output based on MAJOR version +if [[ "$MAJOR" == "2" ]]; then + echo "0.$MINOR.$REST" +elif [[ "$MAJOR" == "3" ]]; then + printf "0.3%02d.$REST\n" "$MINOR" +fi diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index a15cfc97f02..1c099932ba2 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: diff --git a/scripts/ui_release.sh b/scripts/ui_release.sh index ea4423d257e..c1b872fd375 100755 --- a/scripts/ui_release.sh +++ b/scripts/ui_release.sh @@ -30,8 +30,8 @@ function publish() { cmd+=" --dry-run" fi for workspace in ${workspaces}; do - # package "app" is private so we shouldn't try to publish it. - if [[ "${workspace}" != "react-app" ]]; then + # package "mantine-ui" is private so we shouldn't try to publish it. + if [[ "${workspace}" != "mantine-ui" ]]; then cd "${workspace}" eval "${cmd}" cd "${root_ui_folder}" diff --git a/storage/buffer.go b/storage/buffer.go index 651e5c83e8d..e847c10e61a 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -187,6 +187,10 @@ func (s fSample) Type() chunkenc.ValueType { return chunkenc.ValFloat } +func (s fSample) Copy() chunks.Sample { + return s +} + type hSample struct { t int64 h *histogram.Histogram @@ -212,6 +216,10 @@ func (s hSample) Type() chunkenc.ValueType { return chunkenc.ValHistogram } +func (s hSample) Copy() chunks.Sample { + return hSample{t: s.t, h: s.h.Copy()} +} + type fhSample struct { t int64 fh *histogram.FloatHistogram @@ -237,13 +245,17 @@ func (s fhSample) Type() chunkenc.ValueType { return chunkenc.ValFloatHistogram } +func (s fhSample) Copy() chunks.Sample { + return fhSample{t: s.t, fh: s.fh.Copy()} +} + type sampleRing struct { delta int64 // Lookback buffers. We use iBuf for mixed samples, but one of the three - // concrete ones for homogenous samples. (Only one of the four bufs is + // concrete ones for homogeneous samples. (Only one of the four bufs is // allowed to be populated!) This avoids the overhead of the interface - // wrapper for the happy (and by far most common) case of homogenous + // wrapper for the happy (and by far most common) case of homogeneous // samples. iBuf []chunks.Sample fBuf []fSample @@ -268,7 +280,7 @@ const ( fhBuf ) -// newSampleRing creates a new sampleRing. If you do not know the prefereed +// newSampleRing creates a new sampleRing. If you do not know the preferred // value type yet, use a size of 0 (in which case the provided typ doesn't // matter). On the first add, a buffer of size 16 will be allocated with the // preferred type being the type of the first added sample. @@ -535,55 +547,8 @@ func (r *sampleRing) addFH(s fhSample) { } } -// genericAdd is a generic implementation of adding a chunks.Sample -// implementation to a buffer of a sample ring. However, the Go compiler -// currently (go1.20) decides to not expand the code during compile time, but -// creates dynamic code to handle the different types. That has a significant -// overhead during runtime, noticeable in PromQL benchmarks. For example, the -// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7% -// longer runtime, 9% higher allocation size, and 10% more allocations. -// Therefore, genericAdd has been manually implemented for all the types -// (addSample, addF, addH, addFH) below. -// -// func genericAdd[T chunks.Sample](s T, buf []T, r *sampleRing) []T { -// l := len(buf) -// // Grow the ring buffer if it fits no more elements. -// if l == 0 { -// buf = make([]T, 16) -// l = 16 -// } -// if l == r.l { -// newBuf := make([]T, 2*l) -// copy(newBuf[l+r.f:], buf[r.f:]) -// copy(newBuf, buf[:r.f]) -// -// buf = newBuf -// r.i = r.f -// r.f += l -// l = 2 * l -// } else { -// r.i++ -// if r.i >= l { -// r.i -= l -// } -// } -// -// buf[r.i] = s -// r.l++ -// -// // Free head of the buffer of samples that just fell out of the range. -// tmin := s.T() - r.delta -// for buf[r.f].T() < tmin { -// r.f++ -// if r.f >= l { -// r.f -= l -// } -// r.l-- -// } -// return buf -// } - -// addSample is a handcoded specialization of genericAdd (see above). +// addSample adds a sample to a buffer of chunks.Sample, i.e. the general case +// using an interface as the type. func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -607,7 +572,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam } } - buf[r.i] = s + buf[r.i] = s.Copy() r.l++ // Free head of the buffer of samples that just fell out of the range. @@ -622,7 +587,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam return buf } -// addF is a handcoded specialization of genericAdd (see above). +// addF adds an fSample to a (specialized) fSample buffer. func addF(s fSample, buf []fSample, r *sampleRing) []fSample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -661,7 +626,7 @@ func addF(s fSample, buf []fSample, r *sampleRing) []fSample { return buf } -// addH is a handcoded specialization of genericAdd (see above). +// addH adds an hSample to a (specialized) hSample buffer. func addH(s hSample, buf []hSample, r *sampleRing) []hSample { l := len(buf) // Grow the ring buffer if it fits no more elements. @@ -705,7 +670,7 @@ func addH(s hSample, buf []hSample, r *sampleRing) []hSample { return buf } -// addFH is a handcoded specialization of genericAdd (see above). +// addFH adds an fhSample to a (specialized) fhSample buffer. func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { l := len(buf) // Grow the ring buffer if it fits no more elements. diff --git a/storage/buffer_test.go b/storage/buffer_test.go index b5c6443ac59..6e8e83db8f0 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -314,6 +314,56 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { require.Equal(t, histograms[1].ToFloat(nil), fh) } +func TestBufferedSeriesIteratorMixedFloatsAndHistograms(t *testing.T) { + histograms := tsdbutil.GenerateTestHistograms(5) + + it := NewBufferIterator(NewListSeriesIteratorWithCopy(samples{ + hSample{t: 1, h: histograms[0].Copy()}, + fSample{t: 2, f: 2}, + hSample{t: 3, h: histograms[1].Copy()}, + hSample{t: 4, h: histograms[2].Copy()}, + fhSample{t: 3, fh: histograms[3].ToFloat(nil)}, + fhSample{t: 4, fh: histograms[4].ToFloat(nil)}, + }), 6) + + require.Equal(t, chunkenc.ValNone, it.Seek(7)) + require.NoError(t, it.Err()) + + buf := it.Buffer() + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h0 := buf.AtHistogram() + require.Equal(t, histograms[0], h0) + + require.Equal(t, chunkenc.ValFloat, buf.Next()) + _, v := buf.At() + require.Equal(t, 2.0, v) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h1 := buf.AtHistogram() + require.Equal(t, histograms[1], h1) + + require.Equal(t, chunkenc.ValHistogram, buf.Next()) + _, h2 := buf.AtHistogram() + require.Equal(t, histograms[2], h2) + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, h3 := buf.AtFloatHistogram(nil) + require.Equal(t, histograms[3].ToFloat(nil), h3) + + require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) + _, h4 := buf.AtFloatHistogram(nil) + require.Equal(t, histograms[4].ToFloat(nil), h4) + + // Test for overwrite bug where the buffered histogram was reused + // between items in the buffer. + require.Equal(t, histograms[0], h0) + require.Equal(t, histograms[1], h1) + require.Equal(t, histograms[2], h2) + require.Equal(t, histograms[3].ToFloat(nil), h3) + require.Equal(t, histograms[4].ToFloat(nil), h4) +} + func BenchmarkBufferedSeriesIterator(b *testing.B) { // Simulate a 5 minute rate. it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) diff --git a/storage/fanout.go b/storage/fanout.go index e52342bc7ed..4d076788a7c 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -15,9 +15,8 @@ package storage import ( "context" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -28,7 +27,7 @@ import ( ) type fanout struct { - logger log.Logger + logger *slog.Logger primary Storage secondaries []Storage @@ -43,7 +42,7 @@ type fanout struct { // and the error from the secondary querier will be returned as a warning. // // NOTE: In the case of Prometheus, it treats all remote storages as secondary / best effort. -func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage { +func NewFanout(logger *slog.Logger, primary Storage, secondaries ...Storage) Storage { return &fanout{ logger: logger, primary: primary, @@ -142,12 +141,22 @@ func (f *fanout) Close() error { // fanoutAppender implements Appender. type fanoutAppender struct { - logger log.Logger + logger *slog.Logger primary Appender secondaries []Appender } +// SetOptions propagates the hints to both primary and secondary appenders. +func (f *fanoutAppender) SetOptions(opts *AppendOptions) { + if f.primary != nil { + f.primary.SetOptions(opts) + } + for _, appender := range f.secondaries { + appender.SetOptions(opts) + } +} + func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) { ref, err := f.primary.Append(ref, l, t, v) if err != nil { @@ -190,6 +199,20 @@ func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64 return ref, nil } +func (f *fanoutAppender) AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { + ref, err := f.primary.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { ref, err := f.primary.UpdateMetadata(ref, l, m) if err != nil { @@ -226,7 +249,7 @@ func (f *fanoutAppender) Commit() (err error) { err = appender.Commit() } else { if rollbackErr := appender.Rollback(); rollbackErr != nil { - level.Error(f.logger).Log("msg", "Squashed rollback error on commit", "err", rollbackErr) + f.logger.Error("Squashed rollback error on commit", "err", rollbackErr) } } } @@ -242,7 +265,7 @@ func (f *fanoutAppender) Rollback() (err error) { case err == nil: err = rollbackErr case rollbackErr != nil: - level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) + f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr) } } return nil diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 4613fe75727..3eef9e3cd08 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -173,16 +173,13 @@ func TestFanoutErrors(t *testing.T) { } if tc.err != nil { - require.Error(t, ss.Err()) - require.Equal(t, tc.err.Error(), ss.Err().Error()) + require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { - require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() - require.Error(t, w.AsErrors()[0]) - warn, _ := w.AsStrings("", 0, 0) - require.Equal(t, tc.warning.Error(), warn[0]) + require.NotEmpty(t, w, "warnings expected") + require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) t.Run("chunks", func(t *testing.T) { @@ -200,16 +197,13 @@ func TestFanoutErrors(t *testing.T) { } if tc.err != nil { - require.Error(t, ss.Err()) - require.Equal(t, tc.err.Error(), ss.Err().Error()) + require.EqualError(t, ss.Err(), tc.err.Error()) } if tc.warning != nil { - require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() - require.Error(t, w.AsErrors()[0]) - warn, _ := w.AsStrings("", 0, 0) - require.Equal(t, tc.warning.Error(), warn[0]) + require.NotEmpty(t, w, "warnings expected") + require.EqualError(t, w.AsErrors()[0], tc.warning.Error()) } }) } diff --git a/storage/interface.go b/storage/interface.go index 2f125e59028..56bb53dfe00 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -43,13 +43,15 @@ var ( ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") + ErrOOONativeHistogramsDisabled = fmt.Errorf("out-of-order native histogram ingestion is disabled") // ErrOutOfOrderCT indicates failed append of CT to the storage // due to CT being older the then newer sample. // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // behaviour, and we currently don't have a way to determine this. As a result // it's recommended to ignore this error for now. - ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -112,6 +114,8 @@ type Querier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet @@ -150,6 +154,8 @@ type ChunkQuerier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet @@ -157,7 +163,7 @@ type ChunkQuerier interface { // LabelQuerier provides querying access over labels. type LabelQuerier interface { - // LabelValues returns all potential values for a label name. + // LabelValues returns all potential values for a label name in sorted order. // It is not safe to use the strings beyond the lifetime of the querier. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. @@ -237,6 +243,10 @@ func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) { return f(mint, maxt) } +type AppendOptions struct { + DiscardOutOfOrder bool +} + // Appender provides batched appends against a storage. // It must be completed with a call to Commit or Rollback and must not be reused afterwards. // @@ -265,6 +275,10 @@ type Appender interface { // Appender has to be discarded after rollback. Rollback() error + // SetOptions configures the appender with specific append options such as + // discarding out-of-order samples even if out-of-order is enabled in the TSDB. + SetOptions(opts *AppendOptions) + ExemplarAppender HistogramAppender MetadataUpdater @@ -312,6 +326,20 @@ type HistogramAppender interface { // pointer. AppendHistogram won't mutate the histogram, but in turn // depends on the caller to not mutate it either. AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) + // AppendHistogramCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendHistogramCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendHistogramCTZeroSample has to be called before the corresponding histogram AppendHistogram. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendHistogramCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) } // MetadataUpdater provides an interface for associating metadata to stored series. diff --git a/storage/merge.go b/storage/merge.go index 2424b26ab70..a4d0934b16c 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -153,13 +153,18 @@ func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints ) // Schedule all Selects for all queriers we know about. for _, querier := range q.queriers { + // copy the matchers as some queriers may alter the slice. + // See https://github.com/prometheus/prometheus/issues/14723 + matchersCopy := make([]*labels.Matcher, len(matchers)) + copy(matchersCopy, matchers) + wg.Add(1) - go func(qr genericQuerier) { + go func(qr genericQuerier, m []*labels.Matcher) { defer wg.Done() // We need to sort for NewMergeSeriesSet to work. - seriesSetChan <- qr.Select(ctx, true, hints, matchers...) - }(querier) + seriesSetChan <- qr.Select(ctx, true, hints, m...) + }(querier, matchersCopy) } go func() { wg.Wait() diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go index 7c971381206..08870382ec4 100644 --- a/storage/remote/azuread/azuread_test.go +++ b/storage/remote/azuread/azuread_test.go @@ -68,7 +68,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { cases := []struct { cfg *AzureADConfig }{ - // AzureAd roundtripper with Managedidentity. + // AzureAd roundtripper with ManagedIdentity. { cfg: &AzureADConfig{ Cloud: "AzurePublic", diff --git a/storage/remote/chunked_test.go b/storage/remote/chunked_test.go index 7c3993ca62d..82ed8663451 100644 --- a/storage/remote/chunked_test.go +++ b/storage/remote/chunked_test.go @@ -86,7 +86,7 @@ func TestChunkedReader_Overflow(t *testing.T) { _, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next() require.Error(t, err, "expect exceed limit error") - require.Equal(t, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes", err.Error()) + require.EqualError(t, err, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes") } func TestChunkedReader_CorruptedFrame(t *testing.T) { @@ -102,5 +102,5 @@ func TestChunkedReader_CorruptedFrame(t *testing.T) { _, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next() require.Error(t, err, "expected malformed frame") - require.Equal(t, "chunkedReader: corrupted frame; checksum mismatch", err.Error()) + require.EqualError(t, err, "chunkedReader: corrupted frame; checksum mismatch") } diff --git a/storage/remote/client.go b/storage/remote/client.go index 62218cfba91..23775122e56 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "net/http" + "net/http/httptrace" "strconv" "strings" "time" @@ -31,6 +32,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" + "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" @@ -213,8 +215,11 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { if conf.WriteProtoMsg != "" { writeProtoMsg = conf.WriteProtoMsg } - - httpClient.Transport = otelhttp.NewTransport(t) + httpClient.Transport = otelhttp.NewTransport( + t, + otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { + return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) + })) return &Client{ remoteName: name, urlString: conf.URL.String(), diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 404f1add75a..c2fe6186ceb 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -20,9 +20,9 @@ import ( "sync" "testing" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" @@ -253,8 +253,7 @@ func TestValidateLabelsAndMetricName(t *testing.T) { t.Run(test.description, func(t *testing.T) { err := validateLabelsAndMetricName(test.input) if test.expectedErr != "" { - require.Error(t, err) - require.Equal(t, test.expectedErr, err.Error()) + require.EqualError(t, err, test.expectedErr) } else { require.NoError(t, err) } @@ -551,7 +550,7 @@ func TestNegotiateResponseType(t *testing.T) { _, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20}) require.Error(t, err, "expected error due to not supported requested response types") - require.Equal(t, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]", err.Error()) + require.EqualError(t, err, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]") } func TestMergeLabels(t *testing.T) { @@ -583,7 +582,7 @@ func TestDecodeWriteRequest(t *testing.T) { } func TestDecodeWriteV2Request(t *testing.T) { - buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + buf, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) actual, err := DecodeWriteV2Request(bytes.NewReader(buf)) diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go index fdcd668f565..9306dcb4c28 100644 --- a/storage/remote/metadata_watcher.go +++ b/storage/remote/metadata_watcher.go @@ -16,11 +16,11 @@ package remote import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/scrape" ) @@ -44,7 +44,7 @@ func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. type MetadataWatcher struct { name string - logger log.Logger + logger *slog.Logger managerGetter ReadyScrapeManager manager Watchable @@ -62,9 +62,9 @@ type MetadataWatcher struct { } // NewMetadataWatcher builds a new MetadataWatcher. -func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { +func NewMetadataWatcher(l *slog.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if mg == nil { @@ -87,7 +87,7 @@ func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w Meta // Start the MetadataWatcher. func (mw *MetadataWatcher) Start() { - level.Info(mw.logger).Log("msg", "Starting scraped metadata watcher") + mw.logger.Info("Starting scraped metadata watcher") mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background()) mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx) go mw.loop() @@ -95,15 +95,15 @@ func (mw *MetadataWatcher) Start() { // Stop the MetadataWatcher. func (mw *MetadataWatcher) Stop() { - level.Info(mw.logger).Log("msg", "Stopping metadata watcher...") - defer level.Info(mw.logger).Log("msg", "Scraped metadata watcher stopped") + mw.logger.Info("Stopping metadata watcher...") + defer mw.logger.Info("Scraped metadata watcher stopped") mw.softShutdownCancel() select { case <-mw.done: return case <-time.After(mw.deadline): - level.Error(mw.logger).Log("msg", "Failed to flush metadata") + mw.logger.Error("Failed to flush metadata") } mw.hardShutdownCancel() diff --git a/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go b/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go new file mode 100644 index 00000000000..cb9257d0737 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go @@ -0,0 +1,106 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/golang/go/blob/f2d118fd5f7e872804a5825ce29797f81a28b0fa/src/strings/strings.go +// Provenance-includes-license: BSD-3-Clause +// Provenance-includes-copyright: Copyright The Go Authors. + +package prometheus + +import "strings" + +// fieldsFunc is a copy of strings.FieldsFunc from the Go standard library, +// but it also returns the separators as part of the result. +func fieldsFunc(s string, f func(rune) bool) ([]string, []string) { + // A span is used to record a slice of s of the form s[start:end]. + // The start index is inclusive and the end index is exclusive. + type span struct { + start int + end int + } + spans := make([]span, 0, 32) + separators := make([]string, 0, 32) + + // Find the field start and end indices. + // Doing this in a separate pass (rather than slicing the string s + // and collecting the result substrings right away) is significantly + // more efficient, possibly due to cache effects. + start := -1 // valid span start if >= 0 + for end, rune := range s { + if f(rune) { + if start >= 0 { + spans = append(spans, span{start, end}) + // Set start to a negative value. + // Note: using -1 here consistently and reproducibly + // slows down this code by a several percent on amd64. + start = ^start + separators = append(separators, string(s[end])) + } + } else { + if start < 0 { + start = end + } + } + } + + // Last field might end at EOF. + if start >= 0 { + spans = append(spans, span{start, len(s)}) + } + + // Create strings from recorded field indices. + a := make([]string, len(spans)) + for i, span := range spans { + a[i] = s[span.start:span.end] + } + + return a, separators +} + +// join is a copy of strings.Join from the Go standard library, +// but it also accepts a slice of separators to join the elements with. +// If the slice of separators is shorter than the slice of elements, use a default value. +// We also don't check for integer overflow. +func join(elems []string, separators []string, def string) string { + switch len(elems) { + case 0: + return "" + case 1: + return elems[0] + } + + var n int + var sep string + sepLen := len(separators) + for i, elem := range elems { + if i >= sepLen { + sep = def + } else { + sep = separators[i] + } + n += len(sep) + len(elem) + } + + var b strings.Builder + b.Grow(n) + b.WriteString(elems[0]) + for i, s := range elems[1:] { + if i >= sepLen { + sep = def + } else { + sep = separators[i] + } + b.WriteString(sep) + b.WriteString(s) + } + return b.String() +} diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index a112b9bbce2..b928e6888d1 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -19,6 +19,8 @@ package prometheus import ( "strings" "unicode" + + "github.com/prometheus/prometheus/util/strutil" ) // Normalizes the specified label to follow Prometheus label names standard. @@ -26,16 +28,14 @@ import ( // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. // // Labels that start with non-letter rune will be prefixed with "key_". -// // An exception is made for double-underscores which are allowed. -func NormalizeLabel(label string) string { +func NormalizeLabel(label string, allowUTF8 bool) string { // Trivial case - if len(label) == 0 { + if len(label) == 0 || allowUTF8 { return label } - // Replace all non-alphanumeric runes with underscores - label = strings.Map(sanitizeRune, label) + label = strutil.SanitizeLabelName(label) // If label starts with a number, prepend with "key_" if unicode.IsDigit(rune(label[0])) { @@ -46,11 +46,3 @@ func NormalizeLabel(label string) string { return label } - -// Return '_' for anything non-alphanumeric. -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - return '_' -} diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go new file mode 100644 index 00000000000..19ab6cd1730 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/normalize_label_test.go @@ -0,0 +1,48 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNormalizeLabel(t *testing.T) { + tests := []struct { + label string + expected string + expectedUTF8 string + }{ + {"", "", ""}, + {"label:with:colons", "label_with_colons", "label:with:colons"}, // Without UTF-8 support, colons are only allowed in metric names + {"LabelWithCapitalLetters", "LabelWithCapitalLetters", "LabelWithCapitalLetters"}, + {"label!with&special$chars)", "label_with_special_chars_", "label!with&special$chars)"}, + {"label_with_foreign_characters_字符", "label_with_foreign_characters___", "label_with_foreign_characters_字符"}, + {"label.with.dots", "label_with_dots", "label.with.dots"}, + {"123label", "key_123label", "123label"}, + {"_label_starting_with_underscore", "key_label_starting_with_underscore", "_label_starting_with_underscore"}, + {"__label_starting_with_2underscores", "__label_starting_with_2underscores", "__label_starting_with_2underscores"}, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { + result := NormalizeLabel(test.label, false) + require.Equal(t, test.expected, result) + uTF8result := NormalizeLabel(test.label, true) + require.Equal(t, test.expectedUTF8, uTF8result) + }) + } +} diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 0f472b80a09..335705aa8dd 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -17,9 +17,12 @@ package prometheus import ( + "regexp" + "slices" "strings" "unicode" + "github.com/prometheus/prometheus/util/strutil" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -84,38 +87,52 @@ var perUnitMap = map[string]string{ // // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // https://prometheus.io/docs/practices/naming/#metric-and-label-naming -// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. -func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { - var metricName string - +// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. +func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes, allowUTF8 bool) string { // Full normalization following standard Prometheus naming conventions if addMetricSuffixes { - return normalizeName(metric, namespace) + return normalizeName(metric, namespace, allowUTF8) } - // Simple case (no full normalization, no units, etc.), we simply trim out forbidden chars - metricName = RemovePromForbiddenRunes(metric.Name()) + var metricName string + if !allowUTF8 { + // Regexp for metric name characters that should be replaced with _. + invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) + + // Simple case (no full normalization, no units, etc.). + metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") + } else { + metricName = metric.Name() + } // Namespace? if namespace != "" { return namespace + "_" + metricName } - // Metric name starts with a digit? Prefix it with an underscore - if metricName != "" && unicode.IsDigit(rune(metricName[0])) { + // Metric name starts with a digit and utf8 not allowed? Prefix it with an underscore. + if metricName != "" && unicode.IsDigit(rune(metricName[0])) && !allowUTF8 { metricName = "_" + metricName } return metricName } -// Build a normalized name for the specified metric -func normalizeName(metric pmetric.Metric, namespace string) string { - // Split metric name into "tokens" (remove all non-alphanumerics) - nameTokens := strings.FieldsFunc( - metric.Name(), - func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }, - ) +// Build a normalized name for the specified metric. +func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { + var translationFunc func(rune) bool + if !allowUTF8 { + nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) + translationFunc = func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) } + } else { + translationFunc = func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } + } + // Split metric name into "tokens" (of supported metric name runes). + // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. + nameTokens, separators := fieldsFunc(metric.Name(), translationFunc) // Split unit at the '/' if any unitTokens := strings.SplitN(metric.Unit(), "/", 2) @@ -123,11 +140,15 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // Main unit // Append if not blank, doesn't contain '{}', and is not present in metric name already if len(unitTokens) > 0 { + var mainUnitProm, perUnitProm string mainUnitOTel := strings.TrimSpace(unitTokens[0]) if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOTel)) - if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) { - nameTokens = append(nameTokens, mainUnitProm) + mainUnitProm = unitMapGetOrDefault(mainUnitOTel) + if !allowUTF8 { + mainUnitProm = cleanUpUnit(mainUnitProm) + } + if slices.Contains(nameTokens, mainUnitProm) { + mainUnitProm = "" } } @@ -136,13 +157,29 @@ func normalizeName(metric pmetric.Metric, namespace string) string { if len(unitTokens) > 1 && unitTokens[1] != "" { perUnitOTel := strings.TrimSpace(unitTokens[1]) if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOTel)) - if perUnitProm != "" && !contains(nameTokens, perUnitProm) { - nameTokens = append(nameTokens, "per", perUnitProm) + perUnitProm = perUnitMapGetOrDefault(perUnitOTel) + if !allowUTF8 { + perUnitProm = cleanUpUnit(perUnitProm) + } + } + if perUnitProm != "" { + perUnitProm = "per_" + perUnitProm + if slices.Contains(nameTokens, perUnitProm) { + perUnitProm = "" } } } + if perUnitProm != "" { + mainUnitProm = strings.TrimSuffix(mainUnitProm, "_") + } + + if mainUnitProm != "" { + nameTokens = append(nameTokens, mainUnitProm) + } + if perUnitProm != "" { + nameTokens = append(nameTokens, perUnitProm) + } } // Append _total for Counters @@ -164,8 +201,12 @@ func normalizeName(metric pmetric.Metric, namespace string) string { nameTokens = append([]string{namespace}, nameTokens...) } - // Build the string from the tokens, separated with underscores - normalizedName := strings.Join(nameTokens, "_") + // Build the string from the tokens + separators. + // If UTF-8 isn't allowed, we'll use underscores as separators. + if !allowUTF8 { + separators = []string{} + } + normalizedName := join(nameTokens, separators, "_") // Metric name cannot start with a digit, so prefix it with "_" in this case if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { @@ -235,13 +276,15 @@ func removeSuffix(tokens []string, suffix string) []string { return tokens } -// Clean up specified string so it's Prometheus compliant -func CleanUpString(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") -} - -func RemovePromForbiddenRunes(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") +// cleanUpUnit cleans up unit so it matches model.LabelNameRE. +func cleanUpUnit(unit string) string { + // Multiple consecutive underscores are replaced with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. + multipleUnderscoresRE := regexp.MustCompile(`__+`) + return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( + strutil.SanitizeLabelName(unit), + "_", + ), "_") } // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit @@ -262,16 +305,6 @@ func perUnitMapGetOrDefault(perUnit string) string { return perUnit } -// Returns whether the slice contains the specified value -func contains(slice []string, value string) bool { - for _, sliceEntry := range slice { - if sliceEntry == value { - return true - } - } - return false -} - // Remove the specified value from the slice func removeItem(slice []string, value string) []string { newSlice := make([]string, 0, len(slice)) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 07b9b0a784b..d97e7a560a8 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -25,92 +25,119 @@ import ( ) func TestByte(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "", false)) } func TestByteCounter(t *testing.T) { - require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) - require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) + require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "", false)) + require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "", false)) } func TestWhiteSpaces(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "", false)) } func TestNonStandardUnit(t *testing.T) { - require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) + require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "", false)) } func TestNonStandardUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) + require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "", false)) } func TestBrokenUnit(t *testing.T) { - require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) + require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "", false)) } func TestBrokenUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) + require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "", false)) } func TestRatio(t *testing.T) { - require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) - require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) - require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) + require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "", false)) + require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "", false)) + require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "", false)) } func TestHertz(t *testing.T) { - require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) + require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "", false)) } func TestPer(t *testing.T) { - require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) - require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) + require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "", false)) + require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "", false)) } func TestPercent(t *testing.T) { - require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) - require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) + require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "", false)) + require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "", false)) } func TestEmpty(t *testing.T) { - require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) - require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) -} - -func TestUnsupportedRunes(t *testing.T) { - require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "")) - require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) - require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) + require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "", false)) + require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "", false)) +} + +func TestAllowUTF8(t *testing.T) { + t.Run("allow UTF8", func(t *testing.T) { + require.Equal(t, "unsupported.metric.temperature_°F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", true)) + require.Equal(t, "unsupported.metric.weird_+=.:,!* & #", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", true)) + require.Equal(t, "unsupported.metric.redundant___test $_per_°C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", true)) + require.Equal(t, "metric_with_字符_foreign_characters_ど", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", true)) + }) + t.Run("disallow UTF8", func(t *testing.T) { + require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", false)) + require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", false)) + require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", false)) + require.Equal(t, "metric_with_foreign_characters", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", false)) + }) +} + +func TestAllowUTF8KnownBugs(t *testing.T) { + // Due to historical reasons, the translator code was copied from OpenTelemetry collector codebase. + // Over there, they tried to provide means to translate metric names following Prometheus conventions that are documented here: + // https://prometheus.io/docs/practices/naming/ + // + // Althogh not explicitly said, it was implied that words should be separated by a single underscore and the codebase was written + // with that in mind. + // + // Now that we're allowing OTel users to have their original names stored in prometheus without any transformation, we're facing problems + // where two (or more) UTF-8 characters are being used to separate words. + // TODO(arthursens): Fix it! + + // We're asserting on 'NotEqual', which proves the bug. + require.NotEqual(t, "metric....split_=+by_//utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) + // Here we're asserting on 'Equal', showing the current behavior. + require.Equal(t, "metric.split_by_utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) } func TestOTelReceivers(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) - require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) - require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) - require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) - require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) - require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) - require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) - require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) - require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) - require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) - require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) - require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) - require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) - require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) - require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) - require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) - require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) - require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) - require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) - require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) - require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) - require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) + require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "", false)) + require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "", false)) + require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "", false)) + require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "", false)) + require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "", false)) + require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "", false)) + require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "", false)) + require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "", false)) + require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "", false)) + require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "", false)) + require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "", false)) + require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "", false)) + require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "", false)) + require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "", false)) + require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "", false)) + require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "", false)) + require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "", false)) + require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "", false)) + require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "", false)) + require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "", false)) + require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "", false)) + require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "", false)) } func TestTrimPromSuffixes(t *testing.T) { @@ -144,17 +171,17 @@ func TestTrimPromSuffixes(t *testing.T) { } func TestNamespace(t *testing.T) { - require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) - require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) + require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space", false)) + require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space", false)) } -func TestCleanUpString(t *testing.T) { - require.Equal(t, "", CleanUpString("")) - require.Equal(t, "a_b", CleanUpString("a b")) - require.Equal(t, "hello_world", CleanUpString("hello, world!")) - require.Equal(t, "hello_you_2", CleanUpString("hello you 2")) - require.Equal(t, "1000", CleanUpString("$1000")) - require.Equal(t, "", CleanUpString("*+$^=)")) +func TestCleanUpUnit(t *testing.T) { + require.Equal(t, "", cleanUpUnit("")) + require.Equal(t, "a_b", cleanUpUnit("a b")) + require.Equal(t, "hello_world", cleanUpUnit("hello, world")) + require.Equal(t, "hello_you_2", cleanUpUnit("hello you 2")) + require.Equal(t, "1000", cleanUpUnit("$1000")) + require.Equal(t, "", cleanUpUnit("*+$^=)")) } func TestUnitMapGetOrDefault(t *testing.T) { @@ -179,27 +206,29 @@ func TestRemoveItem(t *testing.T) { require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) } -func TestBuildCompliantNameWithNormalize(t *testing.T) { - require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true)) - require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true)) - require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) - require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true)) - require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true)) +func TestBuildCompliantNameWithSuffixes(t *testing.T) { + require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true, false)) + require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true, false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true, false)) + require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true, false)) + require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true, false)) // Gauges with unit 1 are considered ratios. - require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true)) + require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true, false)) // Slashes in units are converted. - require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true)) + require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true, false)) + require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true, false)) } func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false)) - require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false)) - require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false)) - require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) - require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false)) - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false, false)) + require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false, false)) + require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false, false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false, false)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false, false)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false, false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false, false)) + require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false, false)) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index fd7f58f0738..30cfa86436f 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -157,7 +157,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting // map ensures no duplicate label names. l := make(map[string]string, maxLabelCount) for _, label := range labels { - var finalKey = prometheustranslator.NormalizeLabel(label.Name) + var finalKey = prometheustranslator.NormalizeLabel(label.Name, settings.AllowUTF8) if existingValue, alreadyExists := l[finalKey]; alreadyExists { l[finalKey] = existingValue + ";" + label.Value } else { @@ -166,7 +166,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } for _, lbl := range promotedAttrs { - normalized := prometheustranslator.NormalizeLabel(lbl.Name) + normalized := prometheustranslator.NormalizeLabel(lbl.Name, settings.AllowUTF8) if _, exists := l[normalized]; !exists { l[normalized] = lbl.Value } @@ -205,7 +205,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } // internal labels should be maintained if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { - name = prometheustranslator.NormalizeLabel(name) + name = prometheustranslator.NormalizeLabel(name, settings.AllowUTF8) } l[name] = extras[i+1] } @@ -351,9 +351,17 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, exemplarRunes := 0 promExemplar := prompb.Exemplar{ - Value: exemplar.DoubleValue(), Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), } + switch exemplar.ValueType() { + case pmetric.ExemplarValueTypeInt: + promExemplar.Value = float64(exemplar.IntValue()) + case pmetric.ExemplarValueTypeDouble: + promExemplar.Value = exemplar.DoubleValue() + default: + return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + } + if traceID := exemplar.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index a48a57b0625..b22282097d6 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -48,7 +48,6 @@ func TestCreateAttributes(t *testing.T) { resource.Attributes().PutStr(k, v) } attrs := pcommon.NewMap() - attrs.PutStr("__name__", "test_metric") attrs.PutStr("metric-attr", "metric value") testCases := []struct { @@ -162,7 +161,7 @@ func TestCreateAttributes(t *testing.T) { settings := Settings{ PromoteResourceAttributes: tc.promoteResourceAttributes, } - lbls := createAttributes(resource, attrs, settings, nil, false) + lbls := createAttributes(resource, attrs, settings, nil, false, model.MetricNameLabel, "test_metric") assert.ElementsMatch(t, lbls, tc.expectedLabels) }) @@ -406,3 +405,38 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { }) } } + +func TestGetPromExemplars(t *testing.T) { + ctx := context.Background() + everyN := &everyNTimes{n: 1} + + t.Run("Exemplars with int value", func(t *testing.T) { + pt := pmetric.NewNumberDataPoint() + exemplar := pt.Exemplars().AppendEmpty() + exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) + exemplar.SetIntValue(42) + exemplars, err := getPromExemplars(ctx, everyN, pt) + assert.NoError(t, err) + assert.Len(t, exemplars, 1) + assert.Equal(t, float64(42), exemplars[0].Value) + }) + + t.Run("Exemplars with double value", func(t *testing.T) { + pt := pmetric.NewNumberDataPoint() + exemplar := pt.Exemplars().AppendEmpty() + exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) + exemplar.SetDoubleValue(69.420) + exemplars, err := getPromExemplars(ctx, everyN, pt) + assert.NoError(t, err) + assert.Len(t, exemplars, 1) + assert.Equal(t, 69.420, exemplars[0].Value) + }) + + t.Run("Exemplars with unsupported value type", func(t *testing.T) { + pt := pmetric.NewNumberDataPoint() + exemplar := pt.Exemplars().AppendEmpty() + exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) + _, err := getPromExemplars(ctx, everyN, pt) + assert.Error(t, err) + }) +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index e064ab28a28..dcd83b7f935 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -23,12 +23,13 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "github.com/prometheus/prometheus/prompb" + prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) @@ -171,7 +172,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, // Downscale: // 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1 - // Check from sclaing from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1 + // Check from scaling from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1 wantDeltas: []int64{8, -7}, }, }, @@ -222,7 +223,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, // Downscale: // 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1 - // Check from sclaing from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1 + // Check from scaling from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1 wantDeltas: []int64{8, -8, 0, 1}, }, }, @@ -761,7 +762,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { Settings{ ExportCreatedMetric: true, }, - prometheustranslator.BuildCompliantName(metric, "", true), + prometheustranslator.BuildCompliantName(metric, "", true, true), ) require.NoError(t, err) require.Empty(t, annots) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 0afd2ad57e4..4f8baf31004 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -38,6 +38,7 @@ type Settings struct { ExportCreatedMetric bool AddMetricSuffixes bool SendMetadata bool + AllowUTF8 bool PromoteResourceAttributes []string } @@ -84,7 +85,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric continue } - promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) // handle individual metrics based on type //exhaustive:enforce diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go index e932269644c..b01d2cb1fe3 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -22,10 +22,11 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/prometheus/prometheus/prompb" ) func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index ba487041930..b423d2cc6e4 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -43,7 +43,7 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMeta return prompb.MetricMetadata_UNKNOWN } -func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb.MetricMetadata { +func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes, allowUTF8 bool) []*prompb.MetricMetadata { resourceMetricsSlice := md.ResourceMetrics() metadataLength := 0 @@ -65,7 +65,7 @@ func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb metric := scopeMetrics.Metrics().At(k) entry := prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes), + MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes, allowUTF8), Help: metric.Description(), } metadata = append(metadata, &entry) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index b1c8997268b..9f27c333a6d 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "strconv" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" @@ -407,7 +407,7 @@ type QueueManager struct { reshardDisableStartTimestamp atomic.Int64 // Time that reshard was disabled. reshardDisableEndTimestamp atomic.Int64 // Time that reshard is disabled until. - logger log.Logger + logger *slog.Logger flushDeadline time.Duration cfg config.QueueConfig mcfg config.MetadataConfig @@ -454,7 +454,7 @@ func NewQueueManager( metrics *queueManagerMetrics, watcherMetrics *wlog.WatcherMetrics, readerMetrics *wlog.LiveReaderMetrics, - logger log.Logger, + logger *slog.Logger, dir string, samplesIn *ewmaRate, cfg config.QueueConfig, @@ -471,7 +471,7 @@ func NewQueueManager( protoMsg config.RemoteWriteProtoMsg, ) *QueueManager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } // Copy externalLabels into a slice, which we need for processExternalLabels. @@ -480,7 +480,7 @@ func NewQueueManager( extLabelsSlice = append(extLabelsSlice, l) }) - logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint()) + logger = logger.With(remoteName, client.Name(), endpoint, client.Endpoint()) t := &QueueManager{ logger: logger, flushDeadline: flushDeadline, @@ -526,7 +526,7 @@ func NewQueueManager( // ships them alongside series. If both mechanisms are set, the new one // takes precedence by implicitly disabling the older one. if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 { - level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") + logger.Warn("usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") t.mcfg.Send = false } @@ -567,7 +567,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) - level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) + t.logger.Error("non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) } } } @@ -706,7 +706,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) + t.logger.Info("Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) t.metrics.droppedSamplesTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedSamplesTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -769,7 +769,7 @@ outer: // Track dropped exemplars in the same EWMA for sharding calc. t.dataDropped.incr(1) if _, ok := t.droppedSeries[e.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) + t.logger.Info("Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) t.metrics.droppedExemplarsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedExemplarsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -825,7 +825,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -880,7 +880,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -944,8 +944,8 @@ func (t *QueueManager) Start() { // Stop stops sending samples to the remote storage and waits for pending // sends to complete. func (t *QueueManager) Stop() { - level.Info(t.logger).Log("msg", "Stopping remote storage...") - defer level.Info(t.logger).Log("msg", "Remote storage stopped.") + t.logger.Info("Stopping remote storage...") + defer t.logger.Info("Remote storage stopped.") close(t.quit) t.wg.Wait() @@ -1093,10 +1093,10 @@ func (t *QueueManager) updateShardsLoop() { // to stay close to shardUpdateDuration. select { case t.reshardChan <- desiredShards: - level.Info(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", desiredShards) + t.logger.Info("Remote storage resharding", "from", t.numShards, "to", desiredShards) t.numShards = desiredShards default: - level.Info(t.logger).Log("msg", "Currently resharding, skipping.") + t.logger.Info("Currently resharding, skipping.") } case <-t.quit: return @@ -1114,14 +1114,14 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool { minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix() lsts := t.lastSendTimestamp.Load() if lsts < minSendTimestamp { - level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) + t.logger.Warn("Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) return false } if disableTimestamp := t.reshardDisableEndTimestamp.Load(); time.Now().Unix() < disableTimestamp { disabledAt := time.Unix(t.reshardDisableStartTimestamp.Load(), 0) disabledFor := time.Until(time.Unix(disableTimestamp, 0)) - level.Warn(t.logger).Log("msg", "Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) + t.logger.Warn("Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) return false } return true @@ -1164,7 +1164,7 @@ func (t *QueueManager) calculateDesiredShards() int { desiredShards = timePerSample * (dataInRate*dataKeptRatio + backlogCatchup) ) t.metrics.desiredNumShards.Set(desiredShards) - level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", + t.logger.Debug("QueueManager.calculateDesiredShards", "dataInRate", dataInRate, "dataOutRate", dataOutRate, "dataKeptRatio", dataKeptRatio, @@ -1182,7 +1182,7 @@ func (t *QueueManager) calculateDesiredShards() int { lowerBound = float64(t.numShards) * (1. - shardToleranceFraction) upperBound = float64(t.numShards) * (1. + shardToleranceFraction) ) - level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop", + t.logger.Debug("QueueManager.updateShardsLoop", "lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound) desiredShards = math.Ceil(desiredShards) // Round up to be on the safe side. @@ -1193,7 +1193,7 @@ func (t *QueueManager) calculateDesiredShards() int { numShards := int(desiredShards) // Do not downshard if we are more than ten seconds back. if numShards < t.numShards && delay > 10.0 { - level.Debug(t.logger).Log("msg", "Not downsharding due to being too far behind") + t.logger.Debug("Not downsharding due to being too far behind") return t.numShards } @@ -1321,7 +1321,7 @@ func (s *shards) stop() { // Log error for any dropped samples, exemplars, or histograms. logDroppedError := func(t string, counter atomic.Uint32) { if dropped := counter.Load(); dropped > 0 { - level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) + s.qm.logger.Error(fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) } } logDroppedError("samples", s.samplesDroppedOnHardShutdown) @@ -1564,7 +1564,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) n := nPendingSamples + nPendingExemplars + nPendingHistograms if timer { - level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, + s.qm.logger.Debug("runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) } _ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc) @@ -1691,9 +1691,9 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff)) } if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) + s.qm.logger.Error("non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) } else if sampleDiff+exemplarDiff+histogramDiff > 0 { - level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) + s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) } // These counters are used to calculate the dynamic sharding, and as such @@ -2018,16 +2018,16 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt switch { case backoffErr.retryAfter > 0: sleepDuration = backoffErr.retryAfter - level.Info(t.logger).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) + t.logger.Info("Retrying after duration specified by Retry-After header", "duration", sleepDuration) case backoffErr.retryAfter < 0: - level.Debug(t.logger).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") + t.logger.Debug("retry-after cannot be in past, retrying using default backoff mechanism") } // We should never reshard for a recoverable error; increasing shards could // make the problem worse, particularly if we're getting rate limited. // // reshardDisableTimestamp holds the unix timestamp until which resharding - // is diableld. We'll update that timestamp if the period we were just told + // is disabled. We'll update that timestamp if the period we were just told // to sleep for is newer than the existing disabled timestamp. reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2) if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated { @@ -2047,7 +2047,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt // If we make it this far, we've encountered a recoverable error and will retry. onRetry() - level.Warn(t.logger).Log("msg", "Failed to send batch, retrying", "err", err) + t.logger.Warn("Failed to send batch, retrying", "err", err) backoff = sleepDuration * 2 @@ -2147,12 +2147,12 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed [] } } -func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &prompb.WriteRequest{ @@ -2185,11 +2185,11 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada return compressed, highest, lowest, nil } -func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildV2WriteRequest(logger *slog.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &writev2.Request{ diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 032a1a92f7c..4b7c5a4e901 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -28,13 +28,13 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -351,7 +351,7 @@ func TestMetadataDelivery(t *testing.T) { require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal)) require.Len(t, c.receivedMetadata, numMetadata) - // One more write than the rounded qoutient should be performed in order to get samples that didn't + // One more write than the rounded quotient should be performed in order to get samples that didn't // fit into MaxSamplesPerSend. require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived) // Make sure the last samples were sent. @@ -1326,21 +1326,25 @@ func BenchmarkSampleSend(b *testing.B) { cfg.MaxShards = 20 // todo: test with new proto type(s) - m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c, config.RemoteWriteProtoMsgV1) - m.StoreSeries(series, 0) + for _, format := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} { + b.Run(string(format), func(b *testing.B) { + m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c, format) + m.StoreSeries(series, 0) - // These should be received by the client. - m.Start() - defer m.Stop() + // These should be received by the client. + m.Start() + defer m.Stop() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Append(samples) - m.UpdateSeriesSegment(series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does - m.SeriesReset(i + 1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Append(samples) + m.UpdateSeriesSegment(series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does + m.SeriesReset(i + 1) + } + // Do not include shutdown + b.StopTimer() + }) } - // Do not include shutdown - b.StopTimer() } // Check how long it takes to add N series, including external labels processing. @@ -1414,8 +1418,7 @@ func BenchmarkStartup(b *testing.B) { } sort.Ints(segments) - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) - logger = log.With(logger, "caller", log.DefaultCaller) + logger := promslog.New(&promslog.Config{}) cfg := testDefaultQueueConfig() mcfg := config.DefaultMetadataConfig @@ -1849,7 +1852,7 @@ func createDummyTimeSeries(instances int) []timeSeries { } func BenchmarkBuildWriteRequest(b *testing.B) { - noopLogger := log.NewNopLogger() + noopLogger := promslog.NewNopLogger() bench := func(b *testing.B, batch []timeSeries) { buff := make([]byte, 0) seriesBuff := make([]prompb.TimeSeries, len(batch)) @@ -1859,13 +1862,6 @@ func BenchmarkBuildWriteRequest(b *testing.B) { } pBuf := proto.NewBuffer(nil) - // Warmup buffers - for i := 0; i < 10; i++ { - populateTimeSeries(batch, seriesBuff, true, true) - buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy") - } - - b.ResetTimer() totalSize := 0 for i := 0; i < b.N; i++ { populateTimeSeries(batch, seriesBuff, true, true) @@ -1896,46 +1892,44 @@ func BenchmarkBuildWriteRequest(b *testing.B) { } func BenchmarkBuildV2WriteRequest(b *testing.B) { - noopLogger := log.NewNopLogger() - type testcase struct { - batch []timeSeries - } - testCases := []testcase{ - {createDummyTimeSeries(2)}, - {createDummyTimeSeries(10)}, - {createDummyTimeSeries(100)}, - } - for _, tc := range testCases { + noopLogger := promslog.NewNopLogger() + bench := func(b *testing.B, batch []timeSeries) { symbolTable := writev2.NewSymbolTable() buff := make([]byte, 0) - seriesBuff := make([]writev2.TimeSeries, len(tc.batch)) + seriesBuff := make([]writev2.TimeSeries, len(batch)) for i := range seriesBuff { seriesBuff[i].Samples = []writev2.Sample{{}} seriesBuff[i].Exemplars = []writev2.Exemplar{{}} } pBuf := []byte{} - // Warmup buffers - for i := 0; i < 10; i++ { - populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true) - buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy") - } - - b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) { - totalSize := 0 - for j := 0; j < b.N; j++ { - populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true) - b.ResetTimer() - req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy") - if err != nil { - b.Fatal(err) - } - symbolTable.Reset() - totalSize += len(req) - b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op") + totalSize := 0 + for i := 0; i < b.N; i++ { + populateV2TimeSeries(&symbolTable, batch, seriesBuff, true, true) + req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy") + if err != nil { + b.Fatal(err) } - }) + totalSize += len(req) + b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op") + } } + + twoBatch := createDummyTimeSeries(2) + tenBatch := createDummyTimeSeries(10) + hundredBatch := createDummyTimeSeries(100) + + b.Run("2 instances", func(b *testing.B) { + bench(b, twoBatch) + }) + + b.Run("10 instances", func(b *testing.B) { + bench(b, tenBatch) + }) + + b.Run("1k instances", func(b *testing.B) { + bench(b, hundredBatch) + }) } func TestDropOldTimeSeries(t *testing.T) { diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index ffc64c9c3fb..8f2945f9740 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -16,13 +16,12 @@ package remote import ( "context" "errors" + "log/slog" "net/http" "slices" "strings" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" @@ -34,7 +33,7 @@ import ( ) type readHandler struct { - logger log.Logger + logger *slog.Logger queryable storage.SampleAndChunkQueryable config func() config.Config remoteReadSampleLimit int @@ -46,7 +45,7 @@ type readHandler struct { // NewReadHandler creates a http.Handler that accepts remote read requests and // writes them to the provided queryable. -func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { +func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { h := &readHandler{ logger: logger, queryable: queryable, @@ -140,7 +139,7 @@ func (h *readHandler) remoteReadSamples( } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on querier close", "err", err.Error()) + h.logger.Warn("Error on querier close", "err", err.Error()) } }() @@ -163,7 +162,7 @@ func (h *readHandler) remoteReadSamples( return err } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on remote read query", "err", w.Error()) + h.logger.Warn("Warnings on remote read query", "err", w.Error()) } for _, ts := range resp.Results[i].Timeseries { ts.Labels = MergeLabels(ts.Labels, sortedExternalLabels) @@ -208,7 +207,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + h.logger.Warn("Error on chunk querier close", "err", err.Error()) } }() @@ -239,7 +238,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on chunked remote read query", "warnings", w.Error()) + h.logger.Warn("Warnings on chunked remote read query", "warnings", w.Error()) } return nil }(); err != nil { diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 4cd4647e72c..fd7f3ad48d5 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -334,7 +334,7 @@ func TestStreamReadEndpoint(t *testing.T) { Type: prompb.Chunk_XOR, MinTimeMs: 7200000, MaxTimeMs: 7200000, - Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000\000"), + Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000"), }, }, }, @@ -381,7 +381,7 @@ func TestStreamReadEndpoint(t *testing.T) { Type: prompb.Chunk_XOR, MinTimeMs: 14400000, MaxTimeMs: 14400000, - Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000\000"), + Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000"), }, }, }, diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index d63cefc3fe7..b78a8c6215f 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -475,7 +475,9 @@ func TestSampleAndChunkQueryableClient(t *testing.T) { ) q, err := c.Querier(tc.mint, tc.maxt) require.NoError(t, err) - defer require.NoError(t, q.Close()) + defer func() { + require.NoError(t, q.Close()) + }() ss := q.Select(context.Background(), true, nil, tc.matchers...) require.NoError(t, err) diff --git a/storage/remote/storage.go b/storage/remote/storage.go index 05634f1798f..14c3c87d936 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -18,12 +18,13 @@ import ( "crypto/md5" "encoding/hex" "fmt" + "log/slog" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" @@ -51,8 +52,9 @@ type startTimeCallback func() (int64, error) // Storage represents all the remote read and write endpoints. It implements // storage.Storage. type Storage struct { - logger *logging.Deduper - mtx sync.Mutex + deduper *logging.Deduper + logger *slog.Logger + mtx sync.Mutex rws *WriteStorage @@ -62,14 +64,16 @@ type Storage struct { } // NewStorage returns a remote.Storage. -func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { +func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } - logger := logging.Dedupe(l, 1*time.Minute) + deduper := logging.Dedupe(l, 1*time.Minute) + logger := slog.New(deduper) s := &Storage{ logger: logger, + deduper: deduper, localStartTimeCallback: stCallback, } s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL) @@ -196,7 +200,7 @@ func (s *Storage) LowestSentTimestamp() int64 { // Close the background processing of the storage queues. func (s *Storage) Close() error { - s.logger.Stop() + s.deduper.Stop() s.mtx.Lock() defer s.mtx.Unlock() return s.rws.Close() diff --git a/storage/remote/write.go b/storage/remote/write.go index 3d2f1fdfcdb..639f3445209 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -17,13 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -57,7 +58,7 @@ var ( // WriteStorage represents all the remote write storage. type WriteStorage struct { - logger log.Logger + logger *slog.Logger reg prometheus.Registerer mtx sync.Mutex @@ -78,9 +79,9 @@ type WriteStorage struct { } // NewWriteStorage creates and runs a WriteStorage. -func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { +func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } rws := &WriteStorage{ queues: make(map[string]*QueueManager), @@ -277,6 +278,7 @@ func (rws *WriteStorage) Close() error { type timestampTracker struct { writeStorage *WriteStorage + appendOptions *storage.AppendOptions samples int64 exemplars int64 histograms int64 @@ -284,6 +286,10 @@ type timestampTracker struct { highestRecvTimestamp *maxTimestamp } +func (t *timestampTracker) SetOptions(opts *storage.AppendOptions) { + t.appendOptions = opts +} + // Append implements storage.Appender. func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) { t.samples++ @@ -306,14 +312,29 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, return 0, nil } -func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { - // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. - // UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now. +func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64) (storage.SeriesRef, error) { + t.samples++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } return 0, nil } -func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { - // AppendCTZeroSample is no-op for remote-write for now. +func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + t.histograms++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } + return 0, nil +} + +func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. + // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. return 0, nil } diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 736bc8eff3c..87102a374b3 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -18,12 +18,11 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" @@ -42,7 +41,7 @@ import ( ) type writeHandler struct { - logger log.Logger + logger *slog.Logger appendable storage.Appendable samplesWithInvalidLabelsTotal prometheus.Counter @@ -58,7 +57,7 @@ const maxAheadTime = 10 * time.Minute // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. -func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { +func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { protoMsgs[acc] = struct{}{} @@ -119,7 +118,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { msgType, err := h.parseProtoMsg(contentType) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } @@ -131,7 +130,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } return ret }()) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } @@ -142,14 +141,14 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // We could give http.StatusUnsupportedMediaType, but let's assume snappy by default. } else if enc != string(SnappyBlockCompression) { err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } // Read the request body. body, err := io.ReadAll(r.Body) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -157,7 +156,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { decompressed, err := snappy.Decode(nil, body) if err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error()) + h.logger.Error("Error decompressing remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -169,7 +168,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req prompb.WriteRequest if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -180,7 +179,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v1 request", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -193,7 +192,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req writev2.Request if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -205,7 +204,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { if errHTTPCode/5 == 100 { // 5xx - level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v2 request", "err", err.Error()) } http.Error(w, err.Error(), errHTTPCode) return @@ -241,11 +240,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are // potentially written. Perhaps unify with fixed writeV2 implementation a bit. if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { - level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) + h.logger.Warn("Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ continue } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { - level.Warn(h.logger).Log("msg", "Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) + h.logger.Warn("Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) samplesWithInvalidLabels++ continue } @@ -261,10 +260,10 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err switch { case errors.Is(err, storage.ErrOutOfOrderExemplar): outOfOrderExemplarErrs++ - level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Debug("Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) default: // Since exemplar storage is still experimental, we don't fail the request on ingestion errors - level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) } } } @@ -276,7 +275,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err } if outOfOrderExemplarErrs > 0 { - _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } if samplesWithInvalidLabels > 0 { h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -293,7 +292,7 @@ func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err } @@ -315,7 +314,7 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) } return err } @@ -345,7 +344,7 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ Wri // On 5xx, we always rollback, because we expect // sender to retry and TSDB is not idempotent. if rerr := app.Rollback(); rerr != nil { - level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr) + h.logger.Error("writev2 rollback failed on retry-able error", "err", rerr) } return WriteResponseStats{}, errHTTPCode, err } @@ -407,7 +406,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrDuplicateSampleForTimestamp) || errors.Is(err, storage.ErrTooOldSample) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -432,7 +431,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -450,18 +449,18 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Handle append error. if errors.Is(err, storage.ErrOutOfOrderExemplar) { outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here. - level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } // TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed. // For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx. - level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) } m := ts.ToMetadata(req.Symbols) if _, err = app.UpdateMetadata(ref, ls, m); err != nil { - level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) + h.logger.Debug("error while updating metadata from remote write", "err", err) // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, // we don't report remote write error either. We increment metric instead. samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar @@ -469,7 +468,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } if outOfOrderExemplarErrs > 0 { - level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -482,7 +481,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { rwHandler := &writeHandler{ logger: logger, appendable: appendable, @@ -496,7 +495,7 @@ func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, confi } type otlpWriteHandler struct { - logger log.Logger + logger *slog.Logger rwHandler *writeHandler configFunc func() config.Config } @@ -504,7 +503,7 @@ type otlpWriteHandler struct { func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { req, err := DecodeOTLPWriteRequest(r) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -514,14 +513,15 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { converter := otlptranslator.NewPrometheusConverter() annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{ AddMetricSuffixes: true, + AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, }) if err != nil { - level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) + h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) } ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { - level.Warn(h.logger).Log("msg", "Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) + h.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ @@ -535,7 +535,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error()) + h.logger.Error("Error appending remote write", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 5c89a1ab953..580c7c143eb 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -27,11 +27,12 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -129,7 +130,7 @@ func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) { } appendable := &mockAppendable{} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -144,7 +145,7 @@ func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) { } func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) for _, tc := range []struct { @@ -230,7 +231,7 @@ func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) { } appendable := &mockAppendable{} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -255,7 +256,7 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) { // in Prometheus, so keeping like this to not break existing 1.0 clients. appendable := &mockAppendable{} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -428,7 +429,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(payload)) @@ -445,7 +446,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { appendExemplarErr: tc.appendExemplarErr, updateMetadataErr: tc.updateMetadataErr, } - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -544,7 +545,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -586,7 +587,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -624,7 +625,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -655,7 +656,7 @@ func BenchmarkRemoteWriteHandler(b *testing.B) { appendable := &mockAppendable{} // TODO: test with other proto format(s) - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() b.ResetTimer() @@ -672,7 +673,7 @@ func TestCommitErr_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -685,7 +686,7 @@ func TestCommitErr_V1Message(t *testing.T) { } func TestCommitErr_V2Message(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + payload, _, _, err := buildV2WriteRequest(promslog.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(payload)) @@ -696,7 +697,7 @@ func TestCommitErr_V2Message(t *testing.T) { req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -723,7 +724,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { require.NoError(b, db.Close()) }) // TODO: test with other proto format(s) - handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy") require.NoError(b, err) @@ -832,6 +833,10 @@ func (m *mockAppendable) Appender(_ context.Context) storage.Appender { return m } +func (m *mockAppendable) SetOptions(opts *storage.AppendOptions) { + panic("unimplemented") +} + func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if m.appendSampleErr != nil { return 0, m.appendSampleErr @@ -915,6 +920,13 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t return 0, nil } +func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + // TODO(bwplotka/arthursens): Add support for PRW 2.0 for CT zero feature (but also we might + // replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218). + return 0, nil +} + func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { if m.updateMetadataErr != nil { return 0, m.updateMetadataErr diff --git a/storage/series.go b/storage/series.go index 70e3d0a1990..a3dbec7088c 100644 --- a/storage/series.go +++ b/storage/series.go @@ -171,6 +171,34 @@ func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType { func (it *listSeriesIterator) Err() error { return nil } +type listSeriesIteratorWithCopy struct { + *listSeriesIterator +} + +func NewListSeriesIteratorWithCopy(samples Samples) chunkenc.Iterator { + return &listSeriesIteratorWithCopy{ + listSeriesIterator: &listSeriesIterator{samples: samples, idx: -1}, + } +} + +func (it *listSeriesIteratorWithCopy) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { + t, ih := it.listSeriesIterator.AtHistogram(nil) + if h == nil || ih == nil { + return t, ih + } + ih.CopyTo(h) + return t, h +} + +func (it *listSeriesIteratorWithCopy) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + t, ih := it.listSeriesIterator.AtFloatHistogram(nil) + if fh == nil || ih == nil { + return t, ih + } + ih.CopyTo(fh) + return t, fh +} + type listChunkSeriesIterator struct { chks []chunks.Meta idx int diff --git a/tracing/tracing.go b/tracing/tracing.go index 6b9319ecbd6..4fdedf505bd 100644 --- a/tracing/tracing.go +++ b/tracing/tracing.go @@ -16,11 +16,10 @@ package tracing import ( "context" "fmt" + "log/slog" "reflect" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/version" "go.opentelemetry.io/otel" @@ -43,14 +42,14 @@ const serviceName = "prometheus" // Manager is capable of building, (re)installing and shutting down // the tracer provider. type Manager struct { - logger log.Logger + logger *slog.Logger done chan struct{} config config.TracingConfig shutdownFunc func() error } // NewManager creates a new tracing manager. -func NewManager(logger log.Logger) *Manager { +func NewManager(logger *slog.Logger) *Manager { return &Manager{ logger: logger, done: make(chan struct{}), @@ -62,7 +61,7 @@ func NewManager(logger log.Logger) *Manager { func (m *Manager) Run() { otel.SetTextMapPropagator(propagation.TraceContext{}) otel.SetErrorHandler(otelErrHandler(func(err error) { - level.Error(m.logger).Log("msg", "OpenTelemetry handler returned an error", "err", err) + m.logger.Error("OpenTelemetry handler returned an error", "err", err.Error()) })) <-m.done } @@ -89,7 +88,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { m.config = cfg.TracingConfig m.shutdownFunc = nil otel.SetTracerProvider(noop.NewTracerProvider()) - level.Info(m.logger).Log("msg", "Tracing provider uninstalled.") + m.logger.Info("Tracing provider uninstalled.") return nil } @@ -102,7 +101,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { m.config = cfg.TracingConfig otel.SetTracerProvider(tp) - level.Info(m.logger).Log("msg", "Successfully installed a new tracer provider.") + m.logger.Info("Successfully installed a new tracer provider.") return nil } @@ -115,10 +114,10 @@ func (m *Manager) Stop() { } if err := m.shutdownFunc(); err != nil { - level.Error(m.logger).Log("msg", "failed to shut down the tracer provider", "err", err) + m.logger.Error("failed to shut down the tracer provider", "err", err) } - level.Info(m.logger).Log("msg", "Tracing manager stopped") + m.logger.Info("Tracing manager stopped") } type otelErrHandler func(err error) diff --git a/tracing/tracing_test.go b/tracing/tracing_test.go index b7996c61049..e735e1a18a0 100644 --- a/tracing/tracing_test.go +++ b/tracing/tracing_test.go @@ -16,8 +16,8 @@ package tracing import ( "testing" - "github.com/go-kit/log" config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace/noop" @@ -28,7 +28,7 @@ import ( func TestInstallingNewTracerProvider(t *testing.T) { tpBefore := otel.GetTracerProvider() - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -41,7 +41,7 @@ func TestInstallingNewTracerProvider(t *testing.T) { } func TestReinstallingTracerProvider(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -76,7 +76,7 @@ func TestReinstallingTracerProvider(t *testing.T) { } func TestReinstallingTracerProviderWithTLS(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -96,7 +96,7 @@ func TestReinstallingTracerProviderWithTLS(t *testing.T) { } func TestUninstallingTracerProvider(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", @@ -118,7 +118,7 @@ func TestUninstallingTracerProvider(t *testing.T) { } func TestTracerProviderShutdown(t *testing.T) { - m := NewManager(log.NewNopLogger()) + m := NewManager(promslog.NewNopLogger()) cfg := config.Config{ TracingConfig: config.TracingConfig{ Endpoint: "localhost:1234", diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 9697739e00b..3863e6cd998 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -17,14 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "path/filepath" "sync" "time" "unicode/utf8" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.uber.org/atomic" @@ -226,7 +225,7 @@ func (m *dbMetrics) Unregister() { // DB represents a WAL-only storage. It implements storage.DB. type DB struct { mtx sync.RWMutex - logger log.Logger + logger *slog.Logger opts *Options rs *remote.Storage @@ -251,7 +250,7 @@ type DB struct { } // Open returns a new agent.DB in the given directory. -func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) { +func Open(l *slog.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) { opts = validateOptions(opts) locker, err := tsdbutil.NewDirLocker(dir, "agent", l, reg) @@ -306,11 +305,11 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin } if err := db.replayWAL(); err != nil { - level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) + db.logger.Warn("encountered WAL read error, attempting repair", "err", err) if err := w.Repair(err); err != nil { return nil, fmt.Errorf("repair corrupted WAL: %w", err) } - level.Info(db.logger).Log("msg", "successfully repaired WAL") + db.logger.Info("successfully repaired WAL") } go db.run() @@ -335,7 +334,7 @@ func validateOptions(opts *Options) *Options { opts.WALCompression = wlog.CompressionNone } - // Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2. + // Revert StripeSize to DefaultStripeSize if StripeSize is either 0 or not a power of 2. if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { opts.StripeSize = tsdb.DefaultStripeSize } @@ -359,7 +358,7 @@ func validateOptions(opts *Options) *Options { } func (db *DB) replayWAL() error { - level.Info(db.logger).Log("msg", "replaying WAL, this may take a while", "dir", db.wal.Dir()) + db.logger.Info("replaying WAL, this may take a while", "dir", db.wal.Dir()) start := time.Now() dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir()) @@ -376,7 +375,7 @@ func (db *DB) replayWAL() error { } defer func() { if err := sr.Close(); err != nil { - level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + db.logger.Warn("error while closing the wal segments reader", "err", err) } }() @@ -386,7 +385,7 @@ func (db *DB) replayWAL() error { return fmt.Errorf("backfill checkpoint: %w", err) } startFrom++ - level.Info(db.logger).Log("msg", "WAL checkpoint loaded") + db.logger.Info("WAL checkpoint loaded") } // Find the last segment. @@ -395,7 +394,7 @@ func (db *DB) replayWAL() error { return fmt.Errorf("finding WAL segments: %w", err) } - // Backfil segments from the most recent checkpoint onwards. + // Backfill segments from the most recent checkpoint onwards. for i := startFrom; i <= last; i++ { seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i)) if err != nil { @@ -405,12 +404,12 @@ func (db *DB) replayWAL() error { sr := wlog.NewSegmentBufReader(seg) err = db.loadWAL(wlog.NewReader(sr), multiRef) if err := sr.Close(); err != nil { - level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + db.logger.Warn("error while closing the wal segments reader", "err", err) } if err != nil { return err } - level.Info(db.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last) + db.logger.Info("WAL segment loaded", "segment", i, "maxSegment", last) } walReplayDuration := time.Since(start) @@ -571,7 +570,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } if v := nonExistentSeriesRefs.Load(); v > 0 { - level.Warn(db.logger).Log("msg", "found sample referencing non-existing series", "skipped_series", v) + db.logger.Warn("found sample referencing non-existing series", "skipped_series", v) } db.nextRef.Store(uint64(lastRef)) @@ -616,9 +615,9 @@ Loop: ts = maxTS } - level.Debug(db.logger).Log("msg", "truncating the WAL", "ts", ts) + db.logger.Debug("truncating the WAL", "ts", ts) if err := db.truncate(ts); err != nil { - level.Warn(db.logger).Log("msg", "failed to truncate WAL", "err", err) + db.logger.Warn("failed to truncate WAL", "err", err) } } } @@ -631,7 +630,7 @@ func (db *DB) truncate(mint int64) error { start := time.Now() db.gc(mint) - level.Info(db.logger).Log("msg", "series GC completed", "duration", time.Since(start)) + db.logger.Info("series GC completed", "duration", time.Since(start)) first, last, err := wlog.Segments(db.wal.Dir()) if err != nil { @@ -679,7 +678,7 @@ func (db *DB) truncate(mint int64) error { // If truncating fails, we'll just try it again at the next checkpoint. // Leftover segments will still just be ignored in the future if there's a // checkpoint that supersedes them. - level.Error(db.logger).Log("msg", "truncating segments failed", "err", err) + db.logger.Error("truncating segments failed", "err", err) } // The checkpoint is written and segments before it are truncated, so we @@ -696,13 +695,13 @@ func (db *DB) truncate(mint int64) error { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. They will just be ignored since a newer checkpoint // exists. - level.Error(db.logger).Log("msg", "delete old checkpoints", "err", err) + db.logger.Error("delete old checkpoints", "err", err) db.metrics.checkpointDeleteFail.Inc() } db.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) - level.Info(db.logger).Log("msg", "WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) + db.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) return nil } @@ -764,6 +763,7 @@ func (db *DB) Close() error { type appender struct { *DB + hints *storage.AppendOptions pendingSeries []record.RefSeries pendingSamples []record.RefSample @@ -784,6 +784,10 @@ type appender struct { floatHistogramSeries []*memSeries } +func (a *appender) SetOptions(opts *storage.AppendOptions) { + a.hints = opts +} + func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { // series references and chunk references are identical for agent mode. headRef := chunks.HeadSeriesRef(ref) @@ -977,9 +981,134 @@ func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Met return 0, nil } -func (a *appender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { - // TODO(bwplotka): Wire metadata in the Agent's appender. - return 0, nil +func (a *appender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + if err := h.Validate(); err != nil { + return 0, err + } + } + if fh != nil { + if err := fh.Validate(); err != nil { + return 0, err + } + } + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + + series := a.series.GetByID(chunks.HeadSeriesRef(ref)) + if series == nil { + // Ensure no empty labels have gotten through. + l = l.WithoutEmpty() + if l.IsEmpty() { + return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample) + } + + if lbl, dup := l.HasDuplicateLabelNames(); dup { + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample) + } + + var created bool + series, created = a.getOrCreate(l) + if created { + a.pendingSeries = append(a.pendingSeries, record.RefSeries{ + Ref: series.ref, + Labels: l, + }) + a.metrics.numActiveSeries.Inc() + } + } + + series.Lock() + defer series.Unlock() + + if ct <= a.minValidTime(series.lastTs) { + return 0, storage.ErrOutOfOrderCT + } + + if ct > series.lastTs { + series.lastTs = ct + } else { + // discard the sample if it's out of order. + return 0, storage.ErrOutOfOrderCT + } + + switch { + case h != nil: + zeroHistogram := &histogram.Histogram{} + a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{ + Ref: series.ref, + T: ct, + H: zeroHistogram, + }) + a.histogramSeries = append(a.histogramSeries, series) + case fh != nil: + a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{ + Ref: series.ref, + T: ct, + FH: &histogram.FloatHistogram{}, + }) + a.floatHistogramSeries = append(a.floatHistogramSeries, series) + } + + a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() + return storage.SeriesRef(series.ref), nil +} + +func (a *appender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + + series := a.series.GetByID(chunks.HeadSeriesRef(ref)) + if series == nil { + l = l.WithoutEmpty() + if l.IsEmpty() { + return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample) + } + + if lbl, dup := l.HasDuplicateLabelNames(); dup { + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample) + } + + newSeries, created := a.getOrCreate(l) + if created { + a.pendingSeries = append(a.pendingSeries, record.RefSeries{ + Ref: newSeries.ref, + Labels: l, + }) + a.metrics.numActiveSeries.Inc() + } + + series = newSeries + } + + series.Lock() + defer series.Unlock() + + if t <= a.minValidTime(series.lastTs) { + a.metrics.totalOutOfOrderSamples.Inc() + return 0, storage.ErrOutOfOrderSample + } + + if ct > series.lastTs { + series.lastTs = ct + } else { + // discard the sample if it's out of order. + return 0, storage.ErrOutOfOrderCT + } + + // NOTE: always modify pendingSamples and sampleSeries together. + a.pendingSamples = append(a.pendingSamples, record.RefSample{ + Ref: series.ref, + T: ct, + V: 0, + }) + a.sampleSeries = append(a.sampleSeries, series) + + a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc() + + return storage.SeriesRef(series.ref), nil } // Commit submits the collected samples and purges the batch. diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index b31041b1b97..b28c29095c2 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -15,21 +15,23 @@ package agent import ( "context" + "errors" "fmt" + "io" "math" "path/filepath" "strconv" "testing" "time" - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" @@ -89,12 +91,12 @@ func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) * t.Helper() dbDir := t.TempDir() - rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false) + rs := remote.NewStorage(promslog.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false) t.Cleanup(func() { require.NoError(t, rs.Close()) }) - db, err := Open(log.NewNopLogger(), reg, rs, dbDir, opts) + db, err := Open(promslog.NewNopLogger(), reg, rs, dbDir, opts) require.NoError(t, err) return db } @@ -583,7 +585,7 @@ func TestWALReplay(t *testing.T) { func TestLockfile(t *testing.T) { tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() reg := prometheus.NewRegistry() rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false) t.Cleanup(func() { @@ -605,12 +607,12 @@ func TestLockfile(t *testing.T) { func Test_ExistingWAL_NextRef(t *testing.T) { dbDir := t.TempDir() - rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false) + rs := remote.NewStorage(promslog.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false) defer func() { require.NoError(t, rs.Close()) }() - db, err := Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) + db, err := Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) require.NoError(t, err) seriesCount := 10 @@ -638,9 +640,11 @@ func Test_ExistingWAL_NextRef(t *testing.T) { require.NoError(t, db.Close()) // Create a new storage and see what nextRef is initialized to. - db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) + db, err = Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) require.NoError(t, err) - defer require.NoError(t, db.Close()) + defer func() { + require.NoError(t, db.Close()) + }() require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL") } @@ -932,6 +936,249 @@ func TestDBOutOfOrderTimeWindow(t *testing.T) { } } +type walSample struct { + t int64 + f float64 + h *histogram.Histogram + lbls labels.Labels + ref storage.SeriesRef +} + +func TestDBCreatedTimestampSamplesIngestion(t *testing.T) { + t.Parallel() + + type appendableSample struct { + t int64 + ct int64 + v float64 + lbls labels.Labels + h *histogram.Histogram + expectsError bool + } + + testHistogram := tsdbutil.GenerateTestHistograms(1)[0] + zeroHistogram := &histogram.Histogram{} + + lbls := labelsForTest(t.Name(), 1) + defLbls := labels.New(lbls[0]...) + + testCases := []struct { + name string + inputSamples []appendableSample + expectedSamples []*walSample + expectedSeriesCount int + }{ + { + name: "in order ct+normal sample/floatSamples", + inputSamples: []appendableSample{ + {t: 100, ct: 1, v: 10, lbls: defLbls}, + {t: 101, ct: 1, v: 10, lbls: defLbls}, + }, + expectedSamples: []*walSample{ + {t: 1, f: 0, lbls: defLbls}, + {t: 100, f: 10, lbls: defLbls}, + {t: 101, f: 10, lbls: defLbls}, + }, + }, + { + name: "CT+float && CT+histogram samples", + inputSamples: []appendableSample{ + { + t: 100, + ct: 30, + v: 20, + lbls: defLbls, + }, + { + t: 300, + ct: 230, + h: testHistogram, + lbls: defLbls, + }, + }, + expectedSamples: []*walSample{ + {t: 30, f: 0, lbls: defLbls}, + {t: 100, f: 20, lbls: defLbls}, + {t: 230, h: zeroHistogram, lbls: defLbls}, + {t: 300, h: testHistogram, lbls: defLbls}, + }, + expectedSeriesCount: 1, + }, + { + name: "CT+float && CT+histogram samples with error", + inputSamples: []appendableSample{ + { + // invalid CT + t: 100, + ct: 100, + v: 10, + lbls: defLbls, + expectsError: true, + }, + { + // invalid CT histogram + t: 300, + ct: 300, + h: testHistogram, + lbls: defLbls, + expectsError: true, + }, + }, + expectedSamples: []*walSample{ + {t: 100, f: 10, lbls: defLbls}, + {t: 300, h: testHistogram, lbls: defLbls}, + }, + expectedSeriesCount: 0, + }, + { + name: "In order ct+normal sample/histogram", + inputSamples: []appendableSample{ + {t: 100, h: testHistogram, ct: 1, lbls: defLbls}, + {t: 101, h: testHistogram, ct: 1, lbls: defLbls}, + }, + expectedSamples: []*walSample{ + {t: 1, h: &histogram.Histogram{}}, + {t: 100, h: testHistogram}, + {t: 101, h: &histogram.Histogram{CounterResetHint: histogram.NotCounterReset}}, + }, + }, + { + name: "ct+normal then OOO sample/float", + inputSamples: []appendableSample{ + {t: 60_000, ct: 40_000, v: 10, lbls: defLbls}, + {t: 120_000, ct: 40_000, v: 10, lbls: defLbls}, + {t: 180_000, ct: 40_000, v: 10, lbls: defLbls}, + {t: 50_000, ct: 40_000, v: 10, lbls: defLbls}, + }, + expectedSamples: []*walSample{ + {t: 40_000, f: 0, lbls: defLbls}, + {t: 50_000, f: 10, lbls: defLbls}, + {t: 60_000, f: 10, lbls: defLbls}, + {t: 120_000, f: 10, lbls: defLbls}, + {t: 180_000, f: 10, lbls: defLbls}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + reg := prometheus.NewRegistry() + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 360_000 + s := createTestAgentDB(t, reg, opts) + app := s.Appender(context.TODO()) + + for _, sample := range tc.inputSamples { + // We supposed to write a Histogram to the WAL + if sample.h != nil { + _, err := app.AppendHistogramCTZeroSample(0, sample.lbls, sample.t, sample.ct, zeroHistogram, nil) + if !errors.Is(err, storage.ErrOutOfOrderCT) { + require.Equal(t, sample.expectsError, err != nil, "expected error: %v, got: %v", sample.expectsError, err) + } + + _, err = app.AppendHistogram(0, sample.lbls, sample.t, sample.h, nil) + require.NoError(t, err) + } else { + // We supposed to write a float sample to the WAL + _, err := app.AppendCTZeroSample(0, sample.lbls, sample.t, sample.ct) + if !errors.Is(err, storage.ErrOutOfOrderCT) { + require.Equal(t, sample.expectsError, err != nil, "expected error: %v, got: %v", sample.expectsError, err) + } + + _, err = app.Append(0, sample.lbls, sample.t, sample.v) + require.NoError(t, err) + } + } + + require.NoError(t, app.Commit()) + // Close the DB to ensure all data is flushed to the WAL + require.NoError(t, s.Close()) + + // Check that we dont have any OOO samples in the WAL by checking metrics + families, err := reg.Gather() + require.NoError(t, err, "failed to gather metrics") + for _, f := range families { + if f.GetName() == "prometheus_agent_out_of_order_samples_total" { + t.Fatalf("unexpected metric %s", f.GetName()) + } + } + + outputSamples := readWALSamples(t, s.wal.Dir()) + + require.Equal(t, len(tc.expectedSamples), len(outputSamples), "Expected %d samples", len(tc.expectedSamples)) + + for i, expectedSample := range tc.expectedSamples { + for _, sample := range outputSamples { + if sample.t == expectedSample.t && sample.lbls.String() == expectedSample.lbls.String() { + if expectedSample.h != nil { + require.Equal(t, expectedSample.h, sample.h, "histogram value mismatch (sample index %d)", i) + } else { + require.Equal(t, expectedSample.f, sample.f, "value mismatch (sample index %d)", i) + } + } + } + } + }) + } +} + +func readWALSamples(t *testing.T, walDir string) []*walSample { + t.Helper() + sr, err := wlog.NewSegmentsReader(walDir) + require.NoError(t, err) + defer func(sr io.ReadCloser) { + err := sr.Close() + require.NoError(t, err) + }(sr) + + r := wlog.NewReader(sr) + dec := record.NewDecoder(labels.NewSymbolTable()) + + var ( + samples []record.RefSample + histograms []record.RefHistogramSample + + lastSeries record.RefSeries + outputSamples = make([]*walSample, 0) + ) + + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series, err := dec.Series(rec, nil) + require.NoError(t, err) + lastSeries = series[0] + case record.Samples: + samples, err = dec.Samples(rec, samples[:0]) + require.NoError(t, err) + for _, s := range samples { + outputSamples = append(outputSamples, &walSample{ + t: s.T, + f: s.V, + lbls: lastSeries.Labels.Copy(), + ref: storage.SeriesRef(lastSeries.Ref), + }) + } + case record.HistogramSamples: + histograms, err = dec.HistogramSamples(rec, histograms[:0]) + require.NoError(t, err) + for _, h := range histograms { + outputSamples = append(outputSamples, &walSample{ + t: h.T, + h: h.H, + lbls: lastSeries.Labels.Copy(), + ref: storage.SeriesRef(lastSeries.Ref), + }) + } + } + } + + return outputSamples +} + func BenchmarkCreateSeries(b *testing.B) { s := createTestAgentDB(b, nil, DefaultOptions()) defer s.Close() diff --git a/tsdb/block.go b/tsdb/block.go index 2f32733f8c4..48ba4588aaa 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -20,15 +20,16 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -265,7 +266,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { return &m, int64(len(b)), nil } -func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) { +func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, error) { meta.Version = metaVersion1 // Make any changes to the file appear atomic. @@ -273,7 +274,7 @@ func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error tmp := path + ".tmp" defer func() { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() @@ -319,7 +320,7 @@ type Block struct { indexr IndexReader tombstones tombstones.Reader - logger log.Logger + logger *slog.Logger numBytesChunks int64 numBytesIndex int64 @@ -329,9 +330,9 @@ type Block struct { // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs. -func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { +func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var closers []io.Closer defer func() { diff --git a/tsdb/block_test.go b/tsdb/block_test.go index f2569e35be5..3589b42c17f 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -22,12 +22,13 @@ import ( "math/rand" "os" "path/filepath" + "slices" "sort" "strconv" "testing" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -46,7 +47,7 @@ import ( func TestBlockMetaMustNeverBeVersion2(t *testing.T) { dir := t.TempDir() - _, err := writeMetaFile(log.NewNopLogger(), dir, &BlockMeta{}) + _, err := writeMetaFile(promslog.NewNopLogger(), dir, &BlockMeta{}) require.NoError(t, err) meta, _, err := readMetaFile(dir) @@ -151,7 +152,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, err) require.NoError(t, f.Truncate(fi.Size()-1)) }, - iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: segment doesn't include enough bytes to read the chunk - required:26, available:25"), + iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: segment doesn't include enough bytes to read the chunk - required:25, available:24"), }, { name: "checksum mismatch", @@ -169,7 +170,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, n) }, - iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:cfc0526c, actual:34815eae"), + iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:231bddcf, actual:d85ad10d"), }, } { t.Run(tc.name, func(t *testing.T) { @@ -191,7 +192,7 @@ func TestCorruptedChunk(t *testing.T) { // Check open err. b, err := OpenBlock(nil, blockDir, nil) if tc.openErr != nil { - require.Equal(t, tc.openErr.Error(), err.Error()) + require.EqualError(t, err, tc.openErr.Error()) return } defer func() { require.NoError(t, b.Close()) }() @@ -205,7 +206,7 @@ func TestCorruptedChunk(t *testing.T) { require.True(t, set.Next()) it := set.At().Iterator(nil) require.Equal(t, chunkenc.ValNone, it.Next()) - require.Equal(t, tc.iterErr.Error(), it.Err().Error()) + require.EqualError(t, it.Err(), tc.iterErr.Error()) }) } } @@ -310,6 +311,33 @@ func TestLabelValuesWithMatchers(t *testing.T) { } } +func TestBlockQuerierReturnsSortedLabelValues(t *testing.T) { + tmpdir := t.TempDir() + ctx := context.Background() + + var seriesEntries []storage.Series + for i := 100; i > 0; i-- { + seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings( + "__name__", fmt.Sprintf("value%d", i), + ), []chunks.Sample{sample{100, 0, nil, nil}})) + } + + blockDir := createBlock(t, tmpdir, seriesEntries) + + // Check open err. + block, err := OpenBlock(nil, blockDir, nil) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, block.Close()) }) + + q, err := newBlockBaseQuerier(block, 0, 100) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, q.Close()) }) + + res, _, err := q.LabelValues(ctx, "__name__", nil) + require.NoError(t, err) + require.True(t, slices.IsSorted(res)) +} + // TestBlockSize ensures that the block size is calculated correctly. func TestBlockSize(t *testing.T) { tmpdir := t.TempDir() @@ -344,7 +372,7 @@ func TestBlockSize(t *testing.T) { require.NoError(t, err) require.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size") - c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) + c, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, nil) require.NoError(t, err) blockDirsAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) require.NoError(t, err) @@ -593,13 +621,13 @@ func testPostingsForLabelMatching(t *testing.T, offset storage.SeriesRef, setUp // createBlock creates a block with given set of series and returns its dir. func createBlock(tb testing.TB, dir string, series []storage.Series) string { - blockDir, err := CreateBlock(series, dir, 0, log.NewNopLogger()) + blockDir, err := CreateBlock(series, dir, 0, promslog.NewNopLogger()) require.NoError(tb, err) return blockDir } func createBlockFromHead(tb testing.TB, dir string, head *Head) string { - compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) + compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) require.NoError(tb, os.MkdirAll(dir, 0o777)) @@ -613,7 +641,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string { } func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string { - compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) + compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) require.NoError(tb, os.MkdirAll(dir, 0o777)) diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index 232ec2b9148..63f82e28df0 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -17,11 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "os" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/prometheus/model/timestamp" @@ -31,7 +30,7 @@ import ( // BlockWriter is a block writer that allows appending and flushing series to disk. type BlockWriter struct { - logger log.Logger + logger *slog.Logger destinationDir string head *Head @@ -50,7 +49,7 @@ var ErrNoSeriesAppended = errors.New("no series appended, aborting") // contains anything at all. It is the caller's responsibility to // ensure that the resulting blocks do not overlap etc. // Writer ensures the block flush is atomic (via rename). -func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWriter, error) { +func NewBlockWriter(logger *slog.Logger, dir string, blockSize int64) (*BlockWriter, error) { w := &BlockWriter{ logger: logger, destinationDir: dir, @@ -95,7 +94,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. maxt := w.head.MaxTime() + 1 - level.Info(w.logger).Log("msg", "flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) + w.logger.Info("flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) compactor, err := NewLeveledCompactor(ctx, nil, @@ -121,7 +120,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { func (w *BlockWriter) Close() error { defer func() { if err := os.RemoveAll(w.chunkDir); err != nil { - level.Error(w.logger).Log("msg", "error in deleting BlockWriter files", "err", err) + w.logger.Error("error in deleting BlockWriter files", "err", err) } }() return w.head.Close() diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go index d8240b53c6d..4ec25df70a4 100644 --- a/tsdb/blockwriter_test.go +++ b/tsdb/blockwriter_test.go @@ -19,9 +19,10 @@ import ( "path/filepath" "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" ) @@ -29,7 +30,7 @@ import ( func TestBlockWriter(t *testing.T) { ctx := context.Background() outputDir := t.TempDir() - w, err := NewBlockWriter(log.NewNopLogger(), outputDir, DefaultBlockDuration) + w, err := NewBlockWriter(promslog.NewNopLogger(), outputDir, DefaultBlockDuration) require.NoError(t, err) // Add some series. diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index 8cc59f3ea76..6e01798f720 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -86,8 +86,8 @@ func (b *bstream) writeBit(bit bit) { func (b *bstream) writeByte(byt byte) { if b.count == 0 { - b.stream = append(b.stream, 0) - b.count = 8 + b.stream = append(b.stream, byt) + return } i := len(b.stream) - 1 @@ -95,10 +95,8 @@ func (b *bstream) writeByte(byt byte) { // Complete the last byte with the leftmost b.count bits from byt. b.stream[i] |= byt >> (8 - b.count) - b.stream = append(b.stream, 0) - i++ // Write the remainder, if any. - b.stream[i] = byt << b.count + b.stream = append(b.stream, byt< 250 { - break - } a.Append(p.t, p.v) - i++ - j++ } - chunks = append(chunks, c) } - - fmt.Println("num", b.N, "created chunks", len(chunks)) } diff --git a/tsdb/chunkenc/histogram_meta_test.go b/tsdb/chunkenc/histogram_meta_test.go index fdbd1825aae..1774dee8673 100644 --- a/tsdb/chunkenc/histogram_meta_test.go +++ b/tsdb/chunkenc/histogram_meta_test.go @@ -14,7 +14,7 @@ // The code in this file was largely written by Damian Gryski as part of // https://github.com/dgryski/go-tsz and published under the license below. // It was modified to accommodate reading from byte slices without modifying -// the underlying bytes, which would panic when reading from mmap'd +// the underlying bytes, which would panic when reading from mmapped // read-only byte slices. package chunkenc diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 3177762f816..ac75a5994bb 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -14,7 +14,7 @@ // The code in this file was largely written by Damian Gryski as part of // https://github.com/dgryski/go-tsz and published under the license below. // It was modified to accommodate reading from byte slices without modifying -// the underlying bytes, which would panic when reading from mmap'd +// the underlying bytes, which would panic when reading from mmapped // read-only byte slices. // Copyright (c) 2015,2016 Damian Gryski @@ -191,8 +191,8 @@ func (a *xorAppender) Append(t int64, v float64) { case dod == 0: a.b.writeBit(zero) case bitRange(dod, 14): - a.b.writeBits(0b10, 2) - a.b.writeBits(uint64(dod), 14) + a.b.writeByte(0b10<<6 | (uint8(dod>>8) & (1<<6 - 1))) // 0b10 size code combined with 6 bits of dod. + a.b.writeByte(uint8(dod)) // Bottom 8 bits of dod. case bitRange(dod, 17): a.b.writeBits(0b110, 3) a.b.writeBits(uint64(dod), 17) diff --git a/tsdb/chunks/chunk_write_queue.go b/tsdb/chunks/chunk_write_queue.go index 6d2dc743b05..ba9730d9369 100644 --- a/tsdb/chunks/chunk_write_queue.go +++ b/tsdb/chunks/chunk_write_queue.go @@ -24,7 +24,7 @@ import ( ) const ( - // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again. + // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again. chunkRefMapShrinkThreshold = 1000 // Minimum interval between shrinking of chunkWriteQueue.chunkRefMap. diff --git a/tsdb/chunks/samples.go b/tsdb/chunks/samples.go index 638660c70c6..a5b16094df4 100644 --- a/tsdb/chunks/samples.go +++ b/tsdb/chunks/samples.go @@ -29,6 +29,7 @@ type Sample interface { H() *histogram.Histogram FH() *histogram.FloatHistogram Type() chunkenc.ValueType + Copy() Sample // Returns a deep copy. } type SampleSlice []Sample @@ -70,6 +71,17 @@ func (s sample) Type() chunkenc.ValueType { } } +func (s sample) Copy() Sample { + c := sample{t: s.t, f: s.f} + if s.h != nil { + c.h = s.h.Copy() + } + if s.fh != nil { + c.fh = s.fh.Copy() + } + return c +} + // GenerateSamples starting at start and counting up numSamples. func GenerateSamples(start, numSamples int) []Sample { return generateSamples(start, numSamples, func(i int) Sample { diff --git a/tsdb/compact.go b/tsdb/compact.go index 9ef42b339b7..ff35679e3ff 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -19,15 +19,15 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -80,7 +80,7 @@ type Compactor interface { // LeveledCompactor implements the Compactor interface. type LeveledCompactor struct { metrics *CompactorMetrics - logger log.Logger + logger *slog.Logger ranges []int64 chunkPool chunkenc.Pool ctx context.Context @@ -167,7 +167,7 @@ type LeveledCompactorOptions struct { EnableOverlappingCompaction bool } -func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, MergeFunc: mergeFunc, @@ -175,14 +175,14 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register }) } -func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MergeFunc: mergeFunc, EnableOverlappingCompaction: true, }) } -func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { +func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, fmt.Errorf("at least one range must be provided") } @@ -190,7 +190,7 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer pool = chunkenc.NewPool() } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } mergeFunc := opts.MergeFunc if mergeFunc == nil { @@ -500,15 +500,15 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, b.meta.Compaction.Deletable = true n, err := writeMetaFile(c.logger, b.dir, &b.meta) if err != nil { - level.Error(c.logger).Log( - "msg", "Failed to write 'Deletable' to meta file after compaction", + c.logger.Error( + "Failed to write 'Deletable' to meta file after compaction", "ulid", b.meta.ULID, ) } b.numBytesMeta = n } - level.Info(c.logger).Log( - "msg", "compact blocks resulted in empty block", + c.logger.Info( + "compact blocks resulted in empty block", "count", len(blocks), "sources", fmt.Sprintf("%v", uids), "duration", time.Since(start), @@ -516,8 +516,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, return nil, nil } - level.Info(c.logger).Log( - "msg", "compact blocks", + c.logger.Info( + "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime, @@ -568,8 +568,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b } if meta.Stats.NumSamples == 0 { - level.Info(c.logger).Log( - "msg", "write block resulted in empty block", + c.logger.Info( + "write block resulted in empty block", "mint", meta.MinTime, "maxt", meta.MaxTime, "duration", time.Since(start), @@ -577,8 +577,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b return nil, nil } - level.Info(c.logger).Log( - "msg", "write block", + c.logger.Info( + "write block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -617,7 +617,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if err := os.RemoveAll(tmp); err != nil { - level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error()) + c.logger.Error("removed tmp folder after failed compaction", "err", err.Error()) } c.metrics.Ran.Inc() c.metrics.Duration.Observe(time.Since(t).Seconds()) @@ -722,7 +722,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl } type BlockPopulator interface { - PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error + PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error } // IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader. @@ -743,7 +743,7 @@ type DefaultBlockPopulator struct{} // PopulateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } @@ -776,7 +776,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa if i > 0 && b.Meta().MinTime < globalMaxt { metrics.OverlappingBlocks.Inc() overlapping = true - level.Info(logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) + logger.Info("Found overlapping blocks during compaction", "ulid", meta.ULID) } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index e7998abf7d6..5123d6e6245 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -28,9 +28,9 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/oklog/ulid" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -434,7 +434,7 @@ func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) { } func TestCompactionFailWillCleanUpTempDir(t *testing.T) { - compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{ + compactor, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{ 20, 60, 240, @@ -1045,8 +1045,7 @@ func TestCompaction_populateBlock(t *testing.T) { } err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc) if tc.expErr != nil { - require.Error(t, err) - require.Equal(t, tc.expErr.Error(), err.Error()) + require.EqualError(t, err, tc.expErr.Error()) return } require.NoError(t, err) @@ -1163,7 +1162,7 @@ func BenchmarkCompaction(b *testing.B) { blockDirs = append(blockDirs, block.Dir()) } - c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) + c, err := NewLeveledCompactor(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, nil) require.NoError(b, err) b.ResetTimer() @@ -1319,7 +1318,7 @@ func TestCancelCompactions(t *testing.T) { // Measure the compaction time without interrupting it. var timeCompactionUninterrupted time.Duration { - db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) + db, err := open(tmpdir, promslog.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") @@ -1338,7 +1337,7 @@ func TestCancelCompactions(t *testing.T) { } // Measure the compaction time when closing the db in the middle of compaction. { - db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) + db, err := open(tmpdirCopy, promslog.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") @@ -1359,7 +1358,7 @@ func TestCancelCompactions(t *testing.T) { // This checks that the `context.Canceled` error is properly checked at all levels: // - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks. // - callers should check with errors.Is() instead of ==. - readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", log.NewNopLogger()) + readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", promslog.NewNopLogger()) require.NoError(t, err) blocks, err := readOnlyDB.Blocks() require.NoError(t, err) @@ -1371,7 +1370,7 @@ func TestCancelCompactions(t *testing.T) { } // TestDeleteCompactionBlockAfterFailedReload ensures that a failed reloadBlocks immediately after a compaction -// deletes the resulting block to avoid creatings blocks with the same time range. +// deletes the resulting block to avoid creating blocks with the same time range. func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { tests := map[string]func(*DB) int{ "Test Head Compaction": func(db *DB) int { @@ -1918,7 +1917,7 @@ func TestCompactEmptyResultBlockWithTombstone(t *testing.T) { err = block.Delete(ctx, 0, 10, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "0")) require.NoError(t, err) - c, err := NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{0}, nil, nil) + c, err := NewLeveledCompactor(ctx, nil, promslog.NewNopLogger(), []int64{0}, nil, nil) require.NoError(t, err) ulids, err := c.Compact(tmpdir, []string{blockDir}, []*Block{block}) @@ -2114,7 +2113,7 @@ func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { t.Parallel() tmpdir := t.TempDir() - // Some blocks that need compation are present. + // Some blocks that need compaction are present. createBlock(t, tmpdir, genSeries(1, 1, 0, 100)) createBlock(t, tmpdir, genSeries(1, 1, 100, 200)) createBlock(t, tmpdir, genSeries(1, 1, 200, 300)) @@ -2122,7 +2121,7 @@ func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { options := DefaultOptions() // This will make the test timeout if compaction really waits for it. options.CompactionDelay = time.Hour - db, err := open(tmpdir, log.NewNopLogger(), nil, options, []int64{10, 200}, nil) + db, err := open(tmpdir, promslog.NewNopLogger(), nil, options, []int64{10, 200}, nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) diff --git a/tsdb/db.go b/tsdb/db.go index a339414c7b9..bb9fe6ad7e3 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "math" "math/rand" "os" @@ -29,10 +30,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -52,6 +52,9 @@ const ( // DefaultBlockDuration in milliseconds. DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond) + // DefaultCompactionDelayMaxPercent in percentage. + DefaultCompactionDelayMaxPercent = 10 + // Block dir suffixes to make deletion and creation operations atomic. // We decided to do suffixes instead of creating meta.json as last (or delete as first) one, // because in error case you still can recover meta.json from the block content within local TSDB dir. @@ -86,6 +89,7 @@ func DefaultOptions() *Options { EnableOverlappingCompaction: true, EnableSharding: false, EnableDelayedCompaction: false, + CompactionDelayMaxPercent: DefaultCompactionDelayMaxPercent, CompactionDelay: time.Duration(0), } } @@ -173,6 +177,12 @@ type Options struct { // EnableNativeHistograms enables the ingestion of native histograms. EnableNativeHistograms bool + // EnableOOONativeHistograms enables the ingestion of OOO native histograms. + // It will only take effect if EnableNativeHistograms is set to true and the + // OutOfOrderTimeWindow is > 0. This flag will be removed after testing of + // OOO Native Histogram ingestion is complete. + EnableOOONativeHistograms bool + // OutOfOrderTimeWindow specifies how much out of order is allowed, if any. // This can change during run-time, so this value from here should only be used // while initialising. @@ -198,6 +208,8 @@ type Options struct { // CompactionDelay delays the start time of auto compactions. // It can be increased by up to one minute if the DB does not commit too often. CompactionDelay time.Duration + // CompactionDelayMaxPercent is the upper limit for CompactionDelay, specified as a percentage of the head chunk range. + CompactionDelayMaxPercent int // NewCompactorFunc is a function that returns a TSDB compactor. NewCompactorFunc NewCompactorFunc @@ -209,7 +221,7 @@ type Options struct { BlockChunkQuerierFunc BlockChunkQuerierFunc } -type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) +type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} @@ -223,7 +235,7 @@ type DB struct { dir string locker *tsdbutil.DirLocker - logger log.Logger + logger *slog.Logger metrics *dbMetrics opts *Options chunkPool chunkenc.Pool @@ -414,7 +426,7 @@ var ErrClosed = errors.New("db already closed") // Current implementation doesn't support concurrency so // all API calls should happen in the same go routine. type DBReadOnly struct { - logger log.Logger + logger *slog.Logger dir string sandboxDir string closers []io.Closer @@ -422,7 +434,7 @@ type DBReadOnly struct { } // OpenDBReadOnly opens DB in the given directory for read only operations. -func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) { +func OpenDBReadOnly(dir, sandboxDirRoot string, l *slog.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { return nil, fmt.Errorf("opening the db dir: %w", err) } @@ -436,7 +448,7 @@ func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, erro } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } return &DBReadOnly{ @@ -635,7 +647,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { if len(corrupted) > 0 { for _, b := range loadable { if err := b.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b) + db.logger.Warn("Closing block failed", "err", err, "block", b) } } errs := tsdb_errors.NewMulti() @@ -667,7 +679,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during opening", "detail", overlaps.String()) } // Close all previously open readers and add the new ones to the cache. @@ -745,7 +757,7 @@ func (db *DBReadOnly) Close() error { defer func() { // Delete the temporary sandbox directory that was created when opening the DB. if err := os.RemoveAll(db.sandboxDir); err != nil { - level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err) + db.logger.Error("delete sandbox dir", "err", err) } }() select { @@ -759,7 +771,7 @@ func (db *DBReadOnly) Close() error { } // Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used. -func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { +func Open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { var rngs []int64 opts, rngs = validateOpts(opts, nil) @@ -809,12 +821,12 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { // open returns a new DB in the given directory. // It initializes the lockfile, WAL, compactor, and Head (by replaying the WAL), and runs the database. // It is not safe to open more than one DB in the same directory. -func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { +func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if stats == nil { stats = NewDBStats() @@ -948,6 +960,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.MaxExemplars.Store(opts.MaxExemplars) headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms) + headOpts.EnableOOONativeHistograms.Store(opts.EnableOOONativeHistograms) headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow) headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax) headOpts.EnableSharding = opts.EnableSharding @@ -991,17 +1004,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.head.metrics.walCorruptionsTotal.Inc() var e *errLoadWbl if errors.As(initErr, &e) { - level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WBL read error, attempting repair", "err", initErr) if err := wbl.Repair(e.err); err != nil { return nil, fmt.Errorf("repair corrupted WBL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WBL") + db.logger.Info("Successfully repaired WBL") } else { - level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WAL read error, attempting repair", "err", initErr) if err := wal.Repair(initErr); err != nil { return nil, fmt.Errorf("repair corrupted WAL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WAL") + db.logger.Info("Successfully repaired WAL") } } @@ -1019,7 +1032,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return db, nil } -func removeBestEffortTmpDirs(l log.Logger, dir string) error { +func removeBestEffortTmpDirs(l *slog.Logger, dir string) error { files, err := os.ReadDir(dir) if os.IsNotExist(err) { return nil @@ -1030,10 +1043,10 @@ func removeBestEffortTmpDirs(l log.Logger, dir string) error { for _, f := range files { if isTmpDir(f) { if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil { - level.Error(l).Log("msg", "failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) + l.Error("failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) continue } - level.Info(l).Log("msg", "Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) + l.Info("Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) } } return nil @@ -1071,7 +1084,7 @@ func (db *DB) run(ctx context.Context) { case <-time.After(1 * time.Minute): db.cmtx.Lock() if err := db.reloadBlocks(); err != nil { - level.Error(db.logger).Log("msg", "reloadBlocks", "err", err) + db.logger.Error("reloadBlocks", "err", err) } db.cmtx.Unlock() @@ -1087,7 +1100,7 @@ func (db *DB) run(ctx context.Context) { db.autoCompactMtx.Lock() if db.autoCompact { if err := db.Compact(ctx); err != nil { - level.Error(db.logger).Log("msg", "compaction failed", "err", err) + db.logger.Error("compaction failed", "err", err) backoff = exponential(backoff, 1*time.Second, 1*time.Minute) } else { backoff = 0 @@ -1172,6 +1185,16 @@ func (db *DB) DisableNativeHistograms() { db.head.DisableNativeHistograms() } +// EnableOOONativeHistograms enables the ingestion of out-of-order native histograms. +func (db *DB) EnableOOONativeHistograms() { + db.head.EnableOOONativeHistograms() +} + +// DisableOOONativeHistograms disables the ingestion of out-of-order native histograms. +func (db *DB) DisableOOONativeHistograms() { + db.head.DisableOOONativeHistograms() +} + // dbAppender wraps the DB's head appender and triggers compactions on commit // if necessary. type dbAppender struct { @@ -1291,8 +1314,8 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { compactionDuration := time.Since(start) if compactionDuration.Milliseconds() > db.head.chunkRange.Load() { - level.Warn(db.logger).Log( - "msg", "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", + db.logger.Warn( + "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", "duration", compactionDuration.String(), "block_range", db.head.chunkRange.Load(), ) @@ -1416,15 +1439,15 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID } if len(ulids) == 0 { - level.Info(db.logger).Log( - "msg", "compact ooo head resulted in no blocks", + db.logger.Info( + "compact ooo head resulted in no blocks", "duration", time.Since(start), ) return nil, nil } - level.Info(db.logger).Log( - "msg", "out-of-order compaction completed", + db.logger.Info( + "out-of-order compaction completed", "duration", time.Since(start), "ulids", fmt.Sprintf("%v", ulids), ) @@ -1466,7 +1489,7 @@ func (db *DB) compactBlocks() (err error) { // long enough that we end up with a HEAD block that needs to be written. // Check if that's the case and stop compactions early. if db.head.compactable() && !db.waitingForCompactionDelay() { - level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block") + db.logger.Warn("aborting block compactions to persit the head block") return nil } @@ -1562,7 +1585,7 @@ func (db *DB) reloadBlocks() (err error) { for _, b := range block.Meta().Compaction.Parents { if _, ok := corrupted[b.ULID]; ok { delete(corrupted, b.ULID) - level.Warn(db.logger).Log("msg", "Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) + db.logger.Warn("Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) } deletable[b.ULID] = nil } @@ -1624,7 +1647,7 @@ func (db *DB) reloadBlocks() (err error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) } } @@ -1640,7 +1663,7 @@ func (db *DB) reloadBlocks() (err error) { return nil } -func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { +func openBlocks(l *slog.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { return nil, nil, fmt.Errorf("find blocks: %w", err) @@ -1650,7 +1673,7 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po for _, bDir := range bDirs { meta, _, err := readMetaFile(bDir) if err != nil { - level.Error(l).Log("msg", "Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) + l.Error("Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) continue } @@ -1767,7 +1790,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { for ulid, block := range blocks { if block != nil { if err := block.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", ulid) + db.logger.Warn("Closing block failed", "err", err, "block", ulid) } } @@ -1788,7 +1811,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { if err := os.RemoveAll(tmpToDelete); err != nil { return fmt.Errorf("delete obsolete block %s: %w", ulid, err) } - level.Info(db.logger).Log("msg", "Deleting obsolete block", "block", ulid) + db.logger.Info("Deleting obsolete block", "block", ulid) } return nil @@ -1956,7 +1979,7 @@ func (db *DB) DisableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = false - level.Info(db.logger).Log("msg", "Compactions disabled") + db.logger.Info("Compactions disabled") } // EnableCompactions enables auto compactions. @@ -1965,12 +1988,11 @@ func (db *DB) EnableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = true - level.Info(db.logger).Log("msg", "Compactions enabled") + db.logger.Info("Compactions enabled") } func (db *DB) generateCompactionDelay() time.Duration { - // Up to 10% of the head's chunkRange. - return time.Duration(rand.Int63n(db.head.chunkRange.Load()/10)) * time.Millisecond + return time.Duration(rand.Int63n(db.head.chunkRange.Load()*int64(db.opts.CompactionDelayMaxPercent)/100)) * time.Millisecond } // ForceHeadMMap is intended for use only in tests and benchmarks. @@ -1995,7 +2017,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { defer db.mtx.RUnlock() for _, b := range db.blocks { - level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) + db.logger.Info("Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { return fmt.Errorf("error snapshotting block: %s: %w", b.Dir(), err) @@ -2256,7 +2278,7 @@ func (db *DB) CleanTombstones() (err error) { for _, uid := range uids { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { - level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) + db.logger.Error("failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } if err != nil { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 4e3a077f6a4..50f50a3a250 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -15,13 +15,17 @@ package tsdb import ( "bufio" + "bytes" "context" "encoding/binary" "flag" "fmt" "hash/crc32" + "log/slog" "math" "math/rand" + "net/http" + "net/http/httptest" "os" "path" "path/filepath" @@ -32,14 +36,20 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "go.uber.org/goleak" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -102,23 +112,9 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str for ss.Next() { series := ss.At() - samples := []chunks.Sample{} it = series.Iterator(it) - for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() { - switch typ { - case chunkenc.ValFloat: - ts, v := it.At() - samples = append(samples, sample{t: ts, f: v}) - case chunkenc.ValHistogram: - ts, h := it.AtHistogram(nil) - samples = append(samples, sample{t: ts, h: h}) - case chunkenc.ValFloatHistogram: - ts, fh := it.AtFloatHistogram(nil) - samples = append(samples, sample{t: ts, fh: fh}) - default: - t.Fatalf("unknown sample type in query %s", typ.String()) - } - } + samples, err := storage.ExpandSamples(it, newSample) + require.NoError(t, err) require.NoError(t, it.Err()) if len(samples) == 0 { @@ -245,8 +241,8 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) { func TestNoPanicAfterWALCorruption(t *testing.T) { db := openTestDB(t, &Options{WALSegmentSize: 32 * 1024}, nil) - // Append until the first mmaped head chunk. - // This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted. + // Append until the first mmapped head chunk. + // This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted. var expSamples []chunks.Sample var maxt int64 ctx := context.Background() @@ -265,7 +261,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { // Corrupt the WAL after the first sample of the series so that it has at least one sample and // it is not garbage collected. - // The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk. + // The repair deletes all WAL records after the corrupted record and these are read from the mmapped chunk. { walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal")) require.NoError(t, err) @@ -1140,7 +1136,7 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore require.NoError(t, db.Close()) // Reopen the DB, replaying the WAL. - reopenDB, err := Open(db.Dir(), log.NewLogfmtLogger(os.Stderr), nil, nil, nil) + reopenDB, err := Open(db.Dir(), promslog.New(&promslog.Config{}), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, reopenDB.Close()) @@ -1609,7 +1605,7 @@ func TestSizeRetention(t *testing.T) { // Create a WAL checkpoint, and compare sizes. first, last, err := wlog.Segments(db.Head().wal.Dir()) require.NoError(t, err) - _, err = wlog.Checkpoint(log.NewNopLogger(), db.Head().wal, first, last-1, func(x chunks.HeadSeriesRef) bool { return false }, 0) + _, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(x chunks.HeadSeriesRef) bool { return false }, 0) require.NoError(t, err) blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics. walSize, err = db.Head().wal.Size() @@ -2350,7 +2346,7 @@ func TestCorrectNumTombstones(t *testing.T) { // This ensures that a snapshot that includes the head and creates a block with a custom time range // will not overlap with the first block created by the next compaction. func TestBlockRanges(t *testing.T) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger := promslog.New(&promslog.Config{}) ctx := context.Background() dir := t.TempDir() @@ -2435,7 +2431,7 @@ func TestBlockRanges(t *testing.T) { func TestDBReadOnly(t *testing.T) { var ( dbDir string - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = promslog.New(&promslog.Config{}) expBlocks []*Block expBlock *Block expSeries map[string][]chunks.Sample @@ -2553,7 +2549,7 @@ func TestDBReadOnly(t *testing.T) { // all api methods return an ErrClosed. func TestDBReadOnlyClosing(t *testing.T) { sandboxDir := t.TempDir() - db, err := OpenDBReadOnly(t.TempDir(), sandboxDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))) + db, err := OpenDBReadOnly(t.TempDir(), sandboxDir, promslog.New(&promslog.Config{})) require.NoError(t, err) // The sandboxDir was there. require.DirExists(t, db.sandboxDir) @@ -2570,7 +2566,7 @@ func TestDBReadOnlyClosing(t *testing.T) { func TestDBReadOnly_FlushWAL(t *testing.T) { var ( dbDir string - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = promslog.New(&promslog.Config{}) err error maxt int ctx = context.Background() @@ -2650,7 +2646,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) { dBDirHash := dirHash(dir) - // Bootsrap a RO db from the same dir and set up a querier. + // Bootstrap a RO db from the same dir and set up a querier. dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil) require.NoError(t, err) require.Equal(t, chunksCount, countChunks(dir)) @@ -2669,7 +2665,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { require.NoError(t, db.Close()) }() - // Append until the first mmaped head chunk. + // Append until the first mmapped head chunk. for i := 0; i < 121; i++ { app := db.Appender(context.Background()) _, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0) @@ -3115,7 +3111,7 @@ func TestCompactHead(t *testing.T) { WALCompression: wlog.CompressionSnappy, } - db, err := Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err := Open(dbDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) ctx := context.Background() app := db.Appender(ctx) @@ -3136,7 +3132,7 @@ func TestCompactHead(t *testing.T) { // Delete everything but the new block and // reopen the db to query it to ensure it includes the head data. require.NoError(t, deleteNonBlocks(db.Dir())) - db, err = Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err = Open(dbDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) require.Len(t, db.Blocks(), 1) require.Equal(t, int64(maxt), db.Head().MinTime()) @@ -3163,7 +3159,7 @@ func TestCompactHead(t *testing.T) { // TestCompactHeadWithDeletion tests https://github.com/prometheus/prometheus/issues/11585. func TestCompactHeadWithDeletion(t *testing.T) { - db, err := Open(t.TempDir(), log.NewNopLogger(), prometheus.NewRegistry(), nil, nil) + db, err := Open(t.TempDir(), promslog.NewNopLogger(), prometheus.NewRegistry(), nil, nil) require.NoError(t, err) ctx := context.Background() @@ -3276,7 +3272,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { // Regression test: Already removed parent can be still in list, which was causing Open errors. m.Compaction.Parents = append(m.Compaction.Parents, BlockDesc{ULID: ulid.MustParse(filepath.Base(compacted))}) m.Compaction.Parents = append(m.Compaction.Parents, BlockDesc{ULID: ulid.MustParse(filepath.Base(compacted))}) - _, err = writeMetaFile(log.NewLogfmtLogger(os.Stderr), dir, m) + _, err = writeMetaFile(promslog.New(&promslog.Config{}), dir, m) require.NoError(t, err) } tmpCheckpointDir := path.Join(tmpDir, "wal/checkpoint.00000001.tmp") @@ -3288,7 +3284,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { opts := DefaultOptions() opts.RetentionDuration = 0 - db, err := Open(tmpDir, log.NewLogfmtLogger(os.Stderr), nil, opts, nil) + db, err := Open(tmpDir, promslog.New(&promslog.Config{}), nil, opts, nil) require.NoError(t, err) loadedBlocks := db.Blocks() @@ -3332,7 +3328,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { tmpDir := t.TempDir() ctx := context.Background() - db, err := Open(tmpDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err := Open(tmpDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) @@ -3394,7 +3390,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { createBlock(t, db.dir, genSeries(1, 1, newBlockMint, newBlockMaxt)) - db, err = Open(db.dir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) + db, err = Open(db.dir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) db.DisableCompactions() @@ -3443,7 +3439,7 @@ func TestNoPanicOnTSDBOpenError(t *testing.T) { tmpdir := t.TempDir() // Taking the lock will cause a TSDB startup error. - l, err := tsdbutil.NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) + l, err := tsdbutil.NewDirLocker(tmpdir, "tsdb", promslog.NewNopLogger(), nil) require.NoError(t, err) require.NoError(t, l.Lock()) @@ -3996,6 +3992,307 @@ func newTestDB(t *testing.T) *DB { } func TestOOOWALWrite(t *testing.T) { + minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } + + s := labels.NewSymbolTable() + scratchBuilder1 := labels.NewScratchBuilderWithSymbolTable(s, 1) + scratchBuilder1.Add("l", "v1") + s1 := scratchBuilder1.Labels() + scratchBuilder2 := labels.NewScratchBuilderWithSymbolTable(s, 1) + scratchBuilder2.Add("l", "v2") + s2 := scratchBuilder2.Labels() + + scenarios := map[string]struct { + appendSample func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) + expectedOOORecords []interface{} + expectedInORecords []interface{} + }{ + "float": { + appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { + seriesRef, err := app.Append(0, l, minutes(mins), float64(mins)) + require.NoError(t, err) + return seriesRef, nil + }, + expectedOOORecords: []interface{}{ + // The MmapRef in this are not hand calculated, and instead taken from the test run. + // What is important here is the order of records, and that MmapRef increases for each record. + []record.RefMmapMarker{ + {Ref: 1}, + }, + []record.RefSample{ + {Ref: 1, T: minutes(40), V: 40}, + }, + + []record.RefMmapMarker{ + {Ref: 2}, + }, + []record.RefSample{ + {Ref: 2, T: minutes(42), V: 42}, + }, + + []record.RefSample{ + {Ref: 2, T: minutes(45), V: 45}, + {Ref: 1, T: minutes(35), V: 35}, + }, + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 8}, + }, + []record.RefSample{ + {Ref: 1, T: minutes(36), V: 36}, + {Ref: 1, T: minutes(37), V: 37}, + }, + + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 58}, + }, + []record.RefSample{ // Does not contain the in-order sample here. + {Ref: 1, T: minutes(50), V: 50}, + }, + + // Single commit but multiple OOO records. + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 107}, + }, + []record.RefSample{ + {Ref: 2, T: minutes(50), V: 50}, + {Ref: 2, T: minutes(51), V: 51}, + }, + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 156}, + }, + []record.RefSample{ + {Ref: 2, T: minutes(52), V: 52}, + {Ref: 2, T: minutes(53), V: 53}, + }, + }, + expectedInORecords: []interface{}{ + []record.RefSeries{ + {Ref: 1, Labels: s1}, + {Ref: 2, Labels: s2}, + }, + []record.RefSample{ + {Ref: 1, T: minutes(60), V: 60}, + {Ref: 2, T: minutes(60), V: 60}, + }, + []record.RefSample{ + {Ref: 1, T: minutes(40), V: 40}, + }, + []record.RefSample{ + {Ref: 2, T: minutes(42), V: 42}, + }, + []record.RefSample{ + {Ref: 2, T: minutes(45), V: 45}, + {Ref: 1, T: minutes(35), V: 35}, + {Ref: 1, T: minutes(36), V: 36}, + {Ref: 1, T: minutes(37), V: 37}, + }, + []record.RefSample{ // Contains both in-order and ooo sample. + {Ref: 1, T: minutes(50), V: 50}, + {Ref: 2, T: minutes(65), V: 65}, + }, + []record.RefSample{ + {Ref: 2, T: minutes(50), V: 50}, + {Ref: 2, T: minutes(51), V: 51}, + {Ref: 2, T: minutes(52), V: 52}, + {Ref: 2, T: minutes(53), V: 53}, + }, + }, + }, + "integer histogram": { + appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { + seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestHistogram(int(mins)), nil) + require.NoError(t, err) + return seriesRef, nil + }, + expectedOOORecords: []interface{}{ + // The MmapRef in this are not hand calculated, and instead taken from the test run. + // What is important here is the order of records, and that MmapRef increases for each record. + []record.RefMmapMarker{ + {Ref: 1}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestHistogram(40)}, + }, + + []record.RefMmapMarker{ + {Ref: 2}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestHistogram(42)}, + }, + + []record.RefHistogramSample{ + {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestHistogram(45)}, + {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestHistogram(35)}, + }, + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 8}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestHistogram(36)}, + {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestHistogram(37)}, + }, + + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 89}, + }, + []record.RefHistogramSample{ // Does not contain the in-order sample here. + {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)}, + }, + + // Single commit but multiple OOO records. + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 172}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)}, + {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestHistogram(51)}, + }, + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 257}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestHistogram(52)}, + {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestHistogram(53)}, + }, + }, + expectedInORecords: []interface{}{ + []record.RefSeries{ + {Ref: 1, Labels: s1}, + {Ref: 2, Labels: s2}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(60), H: tsdbutil.GenerateTestHistogram(60)}, + {Ref: 2, T: minutes(60), H: tsdbutil.GenerateTestHistogram(60)}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestHistogram(40)}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestHistogram(42)}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestHistogram(45)}, + {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestHistogram(35)}, + {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestHistogram(36)}, + {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestHistogram(37)}, + }, + []record.RefHistogramSample{ // Contains both in-order and ooo sample. + {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)}, + {Ref: 2, T: minutes(65), H: tsdbutil.GenerateTestHistogram(65)}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)}, + {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestHistogram(51)}, + {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestHistogram(52)}, + {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestHistogram(53)}, + }, + }, + }, + "float histogram": { + appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { + seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestFloatHistogram(int(mins))) + require.NoError(t, err) + return seriesRef, nil + }, + expectedOOORecords: []interface{}{ + // The MmapRef in this are not hand calculated, and instead taken from the test run. + // What is important here is the order of records, and that MmapRef increases for each record. + []record.RefMmapMarker{ + {Ref: 1}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestFloatHistogram(40)}, + }, + + []record.RefMmapMarker{ + {Ref: 2}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestFloatHistogram(42)}, + }, + + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestFloatHistogram(45)}, + {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestFloatHistogram(35)}, + }, + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 8}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestFloatHistogram(36)}, + {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestFloatHistogram(37)}, + }, + + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 177}, + }, + []record.RefFloatHistogramSample{ // Does not contain the in-order sample here. + {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)}, + }, + + // Single commit but multiple OOO records. + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 348}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)}, + {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestFloatHistogram(51)}, + }, + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 521}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestFloatHistogram(52)}, + {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestFloatHistogram(53)}, + }, + }, + expectedInORecords: []interface{}{ + []record.RefSeries{ + {Ref: 1, Labels: s1}, + {Ref: 2, Labels: s2}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(60), FH: tsdbutil.GenerateTestFloatHistogram(60)}, + {Ref: 2, T: minutes(60), FH: tsdbutil.GenerateTestFloatHistogram(60)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestFloatHistogram(40)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestFloatHistogram(42)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestFloatHistogram(45)}, + {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestFloatHistogram(35)}, + {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestFloatHistogram(36)}, + {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestFloatHistogram(37)}, + }, + []record.RefFloatHistogramSample{ // Contains both in-order and ooo sample. + {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)}, + {Ref: 2, T: minutes(65), FH: tsdbutil.GenerateTestFloatHistogram(65)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)}, + {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestFloatHistogram(51)}, + {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestFloatHistogram(52)}, + {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestFloatHistogram(53)}, + }, + }, + }, + } + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + testOOOWALWrite(t, scenario.appendSample, scenario.expectedOOORecords, scenario.expectedInORecords) + }) + } +} + +func testOOOWALWrite(t *testing.T, + appendSample func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error), + expectedOOORecords []interface{}, + expectedInORecords []interface{}, +) { dir := t.TempDir() opts := DefaultOptions() @@ -4004,18 +4301,14 @@ func TestOOOWALWrite(t *testing.T) { db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() t.Cleanup(func() { require.NoError(t, db.Close()) }) s1, s2 := labels.FromStrings("l", "v1"), labels.FromStrings("l", "v2") - minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - - appendSample := func(app storage.Appender, l labels.Labels, mins int64) { - _, err = app.Append(0, l, minutes(mins), float64(mins)) - require.NoError(t, err) - } // Ingest sample at 1h. app := db.Appender(context.Background()) @@ -4055,92 +4348,6 @@ func TestOOOWALWrite(t *testing.T) { appendSample(app, s2, 53) require.NoError(t, app.Commit()) - // The MmapRef in this are not hand calculated, and instead taken from the test run. - // What is important here is the order of records, and that MmapRef increases for each record. - oooRecords := []interface{}{ - []record.RefMmapMarker{ - {Ref: 1}, - }, - []record.RefSample{ - {Ref: 1, T: minutes(40), V: 40}, - }, - - []record.RefMmapMarker{ - {Ref: 2}, - }, - []record.RefSample{ - {Ref: 2, T: minutes(42), V: 42}, - }, - - []record.RefSample{ - {Ref: 2, T: minutes(45), V: 45}, - {Ref: 1, T: minutes(35), V: 35}, - }, - []record.RefMmapMarker{ // 3rd sample, hence m-mapped. - {Ref: 1, MmapRef: 4294967304}, - }, - []record.RefSample{ - {Ref: 1, T: minutes(36), V: 36}, - {Ref: 1, T: minutes(37), V: 37}, - }, - - []record.RefMmapMarker{ // 3rd sample, hence m-mapped. - {Ref: 1, MmapRef: 4294967354}, - }, - []record.RefSample{ // Does not contain the in-order sample here. - {Ref: 1, T: minutes(50), V: 50}, - }, - - // Single commit but multiple OOO records. - []record.RefMmapMarker{ - {Ref: 2, MmapRef: 4294967403}, - }, - []record.RefSample{ - {Ref: 2, T: minutes(50), V: 50}, - {Ref: 2, T: minutes(51), V: 51}, - }, - []record.RefMmapMarker{ - {Ref: 2, MmapRef: 4294967452}, - }, - []record.RefSample{ - {Ref: 2, T: minutes(52), V: 52}, - {Ref: 2, T: minutes(53), V: 53}, - }, - } - - inOrderRecords := []interface{}{ - []record.RefSeries{ - {Ref: 1, Labels: s1}, - {Ref: 2, Labels: s2}, - }, - []record.RefSample{ - {Ref: 1, T: minutes(60), V: 60}, - {Ref: 2, T: minutes(60), V: 60}, - }, - []record.RefSample{ - {Ref: 1, T: minutes(40), V: 40}, - }, - []record.RefSample{ - {Ref: 2, T: minutes(42), V: 42}, - }, - []record.RefSample{ - {Ref: 2, T: minutes(45), V: 45}, - {Ref: 1, T: minutes(35), V: 35}, - {Ref: 1, T: minutes(36), V: 36}, - {Ref: 1, T: minutes(37), V: 37}, - }, - []record.RefSample{ // Contains both in-order and ooo sample. - {Ref: 1, T: minutes(50), V: 50}, - {Ref: 2, T: minutes(65), V: 65}, - }, - []record.RefSample{ - {Ref: 2, T: minutes(50), V: 50}, - {Ref: 2, T: minutes(51), V: 51}, - {Ref: 2, T: minutes(52), V: 52}, - {Ref: 2, T: minutes(53), V: 53}, - }, - } - getRecords := func(walDir string) []interface{} { sr, err := wlog.NewSegmentsReader(walDir) require.NoError(t, err) @@ -4149,10 +4356,8 @@ func TestOOOWALWrite(t *testing.T) { require.NoError(t, sr.Close()) }() - var ( - records []interface{} - dec record.Decoder = record.NewDecoder(labels.NewSymbolTable()) - ) + var records []interface{} + dec := record.NewDecoder(nil) for r.Next() { rec := r.Record() switch typ := dec.Type(rec); typ { @@ -4168,6 +4373,14 @@ func TestOOOWALWrite(t *testing.T) { markers, err := dec.MmapMarkers(rec, nil) require.NoError(t, err) records = append(records, markers) + case record.HistogramSamples: + histogramSamples, err := dec.HistogramSamples(rec, nil) + require.NoError(t, err) + records = append(records, histogramSamples) + case record.FloatHistogramSamples: + floatHistogramSamples, err := dec.FloatHistogramSamples(rec, nil) + require.NoError(t, err) + records = append(records, floatHistogramSamples) default: t.Fatalf("got a WAL record that is not series or samples: %v", typ) } @@ -4178,11 +4391,11 @@ func TestOOOWALWrite(t *testing.T) { // The normal WAL. actRecs := getRecords(path.Join(dir, "wal")) - testutil.RequireEqual(t, inOrderRecords, actRecs) + require.Equal(t, expectedInORecords, actRecs) // The WBL. actRecs = getRecords(path.Join(dir, wlog.WblDirName)) - testutil.RequireEqual(t, oooRecords, actRecs) + require.Equal(t, expectedOOORecords, actRecs) } // Tests https://github.com/prometheus/prometheus/issues/10291#issuecomment-1044373110. @@ -4381,7 +4594,7 @@ func TestMetadataCheckpointingOnlyKeepsLatestEntry(t *testing.T) { keep := func(id chunks.HeadSeriesRef) bool { return id != 3 } - _, err = wlog.Checkpoint(log.NewNopLogger(), w, first, last-1, keep, 0) + _, err = wlog.Checkpoint(promslog.NewNopLogger(), w, first, last-1, keep, 0) require.NoError(t, err) // Confirm there's been a checkpoint. @@ -4495,6 +4708,160 @@ func TestMetadataAssertInMemoryData(t *testing.T) { require.Equal(t, *reopenDB.head.series.getByHash(s4.Hash(), s4).meta, m4) } +// TestMultipleEncodingsCommitOrder mainly serves to demonstrate when happens when committing a batch of samples for the +// same series when there are multiple encodings. Commit() will process all float samples before histogram samples. This +// means that if histograms are appended before floats, the histograms could be marked as OOO when they are committed. +// While possible, this shouldn't happen very often - you need the same series to be ingested as both a float and a +// histogram in a single write request. +func TestMultipleEncodingsCommitOrder(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderCapMax = 30 + opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() + + series1 := labels.FromStrings("foo", "bar1") + + db := openTestDB(t, opts, nil) + db.DisableCompactions() + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() + defer func() { + require.NoError(t, db.Close()) + }() + + addSample := func(app storage.Appender, ts int64, valType chunkenc.ValueType) chunks.Sample { + if valType == chunkenc.ValFloat { + _, err := app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts)) + require.NoError(t, err) + return sample{t: ts, f: float64(ts)} + } + if valType == chunkenc.ValHistogram { + h := tsdbutil.GenerateTestHistogram(int(ts)) + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) + require.NoError(t, err) + return sample{t: ts, h: h} + } + fh := tsdbutil.GenerateTestFloatHistogram(int(ts)) + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) + require.NoError(t, err) + return sample{t: ts, fh: fh} + } + + verifySamples := func(minT, maxT int64, expSamples []chunks.Sample, oooCount int) { + requireEqualOOOSamples(t, oooCount, db) + + // Verify samples querier. + querier, err := db.Querier(minT, maxT) + require.NoError(t, err) + defer querier.Close() + + seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) + require.Len(t, seriesSet, 1) + gotSamples := seriesSet[series1.String()] + requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets) + + // Verify chunks querier. + chunkQuerier, err := db.ChunkQuerier(minT, maxT) + require.NoError(t, err) + defer chunkQuerier.Close() + + chks := queryChunks(t, chunkQuerier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) + require.NotNil(t, chks[series1.String()]) + require.Len(t, chks, 1) + var gotChunkSamples []chunks.Sample + for _, chunk := range chks[series1.String()] { + it := chunk.Chunk.Iterator(nil) + smpls, err := storage.ExpandSamples(it, newSample) + require.NoError(t, err) + gotChunkSamples = append(gotChunkSamples, smpls...) + require.NoError(t, it.Err()) + } + requireEqualSamples(t, series1.String(), expSamples, gotChunkSamples, requireEqualSamplesIgnoreCounterResets) + } + + var expSamples []chunks.Sample + + // Append samples with different encoding types and then commit them at once. + app := db.Appender(context.Background()) + + for i := 100; i < 105; i++ { + s := addSample(app, int64(i), chunkenc.ValFloat) + expSamples = append(expSamples, s) + } + // These samples will be marked as OOO as their timestamps are less than the max timestamp for float samples in the + // same batch. + for i := 110; i < 120; i++ { + s := addSample(app, int64(i), chunkenc.ValHistogram) + expSamples = append(expSamples, s) + } + // These samples will be marked as OOO as their timestamps are less than the max timestamp for float samples in the + // same batch. + for i := 120; i < 130; i++ { + s := addSample(app, int64(i), chunkenc.ValFloatHistogram) + expSamples = append(expSamples, s) + } + // These samples will be marked as in-order as their timestamps are greater than the max timestamp for float + // samples in the same batch. + for i := 140; i < 150; i++ { + s := addSample(app, int64(i), chunkenc.ValFloatHistogram) + expSamples = append(expSamples, s) + } + // These samples will be marked as in-order, even though they're appended after the float histograms from ts 140-150 + // because float samples are processed first and these samples are in-order wrt to the float samples in the batch. + for i := 130; i < 135; i++ { + s := addSample(app, int64(i), chunkenc.ValFloat) + expSamples = append(expSamples, s) + } + + require.NoError(t, app.Commit()) + + sort.Slice(expSamples, func(i, j int) bool { + return expSamples[i].T() < expSamples[j].T() + }) + + // oooCount = 20 because the histograms from 120 - 130 and float histograms from 120 - 130 are detected as OOO. + verifySamples(100, 150, expSamples, 20) + + // Append and commit some in-order histograms by themselves. + app = db.Appender(context.Background()) + for i := 150; i < 160; i++ { + s := addSample(app, int64(i), chunkenc.ValHistogram) + expSamples = append(expSamples, s) + } + require.NoError(t, app.Commit()) + + // oooCount remains at 20 as no new OOO samples have been added. + verifySamples(100, 160, expSamples, 20) + + // Append and commit samples for all encoding types. This time all samples will be treated as OOO because samples + // with newer timestamps have already been committed. + app = db.Appender(context.Background()) + for i := 50; i < 55; i++ { + s := addSample(app, int64(i), chunkenc.ValFloat) + expSamples = append(expSamples, s) + } + for i := 60; i < 70; i++ { + s := addSample(app, int64(i), chunkenc.ValHistogram) + expSamples = append(expSamples, s) + } + for i := 70; i < 75; i++ { + s := addSample(app, int64(i), chunkenc.ValFloat) + expSamples = append(expSamples, s) + } + for i := 80; i < 90; i++ { + s := addSample(app, int64(i), chunkenc.ValFloatHistogram) + expSamples = append(expSamples, s) + } + require.NoError(t, app.Commit()) + + // Sort samples again because OOO samples have been added. + sort.Slice(expSamples, func(i, j int) bool { + return expSamples[i].T() < expSamples[j].T() + }) + + // oooCount = 50 as we've added 30 more OOO samples. + verifySamples(50, 160, expSamples, 50) +} + // TODO(codesome): test more samples incoming once compaction has started. To verify new samples after the start // // are not included in this compaction. @@ -4516,6 +4883,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample opts := DefaultOptions() opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -4721,6 +5090,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) db.DisableCompactions() // We want to manually call it. + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -4826,10 +5197,14 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds() opts.WALSegmentSize = -1 // disabled WAL and WBL + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) db.DisableCompactions() // We want to manually call it. + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -4935,6 +5310,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa opts.OutOfOrderCapMax = 10 opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds() opts.EnableMemorySnapshotOnShutdown = true + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -5034,7 +5411,67 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa verifySamples(90, 109) } -func Test_Querier_OOOQuery(t *testing.T) { +func TestQuerierOOOQuery(t *testing.T) { + scenarios := map[string]struct { + appendFunc func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) + sampleFunc func(ts int64) chunks.Sample + }{ + "float": { + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + return app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts)) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, f: float64(ts)} + }, + }, + "integer histogram": { + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + h := tsdbutil.GenerateTestHistogram(int(ts)) + if counterReset { + h.CounterResetHint = histogram.CounterReset + } + return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + }, + }, + "float histogram": { + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + fh := tsdbutil.GenerateTestFloatHistogram(int(ts)) + if counterReset { + fh.CounterResetHint = histogram.CounterReset + } + return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + }, + }, + "integer histogram counter resets": { + // Adding counter reset to all histograms means each histogram will have its own chunk. + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + h := tsdbutil.GenerateTestHistogram(int(ts)) + h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument. + return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + }, + }, + } + + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + testQuerierOOOQuery(t, scenario.appendFunc, scenario.sampleFunc) + }) + } +} + +func testQuerierOOOQuery(t *testing.T, + appendFunc func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error), + sampleFunc func(ts int64) chunks.Sample, +) { opts := DefaultOptions() opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() @@ -5044,16 +5481,16 @@ func Test_Querier_OOOQuery(t *testing.T) { defaultFilterFunc := func(t int64) bool { return true } minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc) ([]chunks.Sample, int) { + addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() { if !filter(m / time.Minute.Milliseconds()) { continue } - _, err := app.Append(0, series1, m, float64(m)) + _, err := appendFunc(app, m, counterReset) if m >= queryMinT && m <= queryMaxT { - expSamples = append(expSamples, sample{t: m, f: float64(m)}) + expSamples = append(expSamples, sampleFunc(m)) } require.NoError(t, err) totalAppended++ @@ -5064,10 +5501,11 @@ func Test_Querier_OOOQuery(t *testing.T) { } type sampleBatch struct { - minT int64 - maxT int64 - filter filterFunc - isOOO bool + minT int64 + maxT int64 + filter filterFunc + counterReset bool + isOOO bool } tests := []struct { @@ -5115,6 +5553,31 @@ func Test_Querier_OOOQuery(t *testing.T) { }, }, }, + { + name: "alternating OOO batches", // In order: 100-200 normal. out of order first path: 0, 2, 4, ... 98 (no counter reset), second pass: 1, 3, 5, ... 99 (with counter reset). + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: defaultFilterFunc, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: true, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: func(t int64) bool { return t%2 == 1 }, + counterReset: true, + isOOO: true, + }, + }, + }, { name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval", oooCap: 30, @@ -5156,7 +5619,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, }, { - name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 5, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5169,7 +5632,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. + maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5182,7 +5645,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, }, { - name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 30, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5195,7 +5658,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. + maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5213,6 +5676,8 @@ func Test_Querier_OOOQuery(t *testing.T) { opts.OutOfOrderCapMax = tc.oooCap db := openTestDB(t, opts, nil) db.DisableCompactions() + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() defer func() { require.NoError(t, db.Close()) }() @@ -5221,7 +5686,7 @@ func Test_Querier_OOOQuery(t *testing.T) { var oooSamples, appendedCount int for _, batch := range tc.batches { - expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter) + expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter, batch.counterReset) if batch.isOOO { oooSamples += appendedCount } @@ -5236,35 +5701,127 @@ func Test_Querier_OOOQuery(t *testing.T) { defer querier.Close() seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) - require.NotNil(t, seriesSet[series1.String()]) + gotSamples := seriesSet[series1.String()] + require.NotNil(t, gotSamples) require.Len(t, seriesSet, 1) - require.Equal(t, expSamples, seriesSet[series1.String()]) + requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets) requireEqualOOOSamples(t, oooSamples, db) }) } } -func Test_ChunkQuerier_OOOQuery(t *testing.T) { - opts := DefaultOptions() - opts.OutOfOrderCapMax = 30 - opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() +func TestChunkQuerierOOOQuery(t *testing.T) { + nBucketHistogram := func(n int64) *histogram.Histogram { + h := &histogram.Histogram{ + Count: uint64(n), + Sum: float64(n), + } + if n == 0 { + h.PositiveSpans = []histogram.Span{} + h.PositiveBuckets = []int64{} + return h + } + h.PositiveSpans = []histogram.Span{{Offset: 0, Length: uint32(n)}} + h.PositiveBuckets = make([]int64, n) + h.PositiveBuckets[0] = 1 + return h + } - series1 := labels.FromStrings("foo", "bar1") + scenarios := map[string]struct { + appendFunc func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) + sampleFunc func(ts int64) chunks.Sample + checkInUseBucket bool + }{ + "float": { + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + return app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts)) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, f: float64(ts)} + }, + }, + "integer histogram": { + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + h := tsdbutil.GenerateTestHistogram(int(ts)) + if counterReset { + h.CounterResetHint = histogram.CounterReset + } + return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + }, + }, + "float histogram": { + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + fh := tsdbutil.GenerateTestFloatHistogram(int(ts)) + if counterReset { + fh.CounterResetHint = histogram.CounterReset + } + return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + }, + }, + "integer histogram counter resets": { + // Adding counter reset to all histograms means each histogram will have its own chunk. + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + h := tsdbutil.GenerateTestHistogram(int(ts)) + h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument. + return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) + }, + sampleFunc: func(ts int64) chunks.Sample { + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + }, + }, + "integer histogram with recode": { + // Histograms have increasing number of buckets so their chunks are recoded. + appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + n := ts / time.Minute.Milliseconds() + return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nBucketHistogram(n), nil) + }, + sampleFunc: func(ts int64) chunks.Sample { + n := ts / time.Minute.Milliseconds() + return sample{t: ts, h: nBucketHistogram(n)} + }, + // Only check in-use buckets for this scenario. + // Recoding adds empty buckets. + checkInUseBucket: true, + }, + } + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + testChunkQuerierOOOQuery(t, scenario.appendFunc, scenario.sampleFunc, scenario.checkInUseBucket) + }) + } +} + +func testChunkQuerierOOOQuery(t *testing.T, + appendFunc func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error), + sampleFunc func(ts int64) chunks.Sample, + checkInUseBuckets bool, +) { + opts := DefaultOptions() + opts.OutOfOrderCapMax = 30 + opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() + + series1 := labels.FromStrings("foo", "bar1") type filterFunc func(t int64) bool defaultFilterFunc := func(t int64) bool { return true } minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc) ([]chunks.Sample, int) { + addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() { if !filter(m / time.Minute.Milliseconds()) { continue } - _, err := app.Append(0, series1, m, float64(m)) + _, err := appendFunc(app, m, counterReset) if m >= queryMinT && m <= queryMaxT { - expSamples = append(expSamples, sample{t: m, f: float64(m)}) + expSamples = append(expSamples, sampleFunc(m)) } require.NoError(t, err) totalAppended++ @@ -5275,10 +5832,11 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { } type sampleBatch struct { - minT int64 - maxT int64 - filter filterFunc - isOOO bool + minT int64 + maxT int64 + filter filterFunc + counterReset bool + isOOO bool } tests := []struct { @@ -5326,6 +5884,31 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, }, }, + { + name: "alternating OOO batches", // In order: 100-200 normal. out of order first path: 0, 2, 4, ... 98 (no counter reset), second pass: 1, 3, 5, ... 99 (with counter reset). + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: defaultFilterFunc, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: true, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: func(t int64) bool { return t%2 == 1 }, + counterReset: true, + isOOO: true, + }, + }, + }, { name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval", oooCap: 30, @@ -5367,7 +5950,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, }, { - name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 5, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5380,7 +5963,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. + maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5393,7 +5976,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, }, { - name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 30, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5406,7 +5989,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. + maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5424,6 +6007,8 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { opts.OutOfOrderCapMax = tc.oooCap db := openTestDB(t, opts, nil) db.DisableCompactions() + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() defer func() { require.NoError(t, db.Close()) }() @@ -5432,7 +6017,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { var oooSamples, appendedCount int for _, batch := range tc.batches { - expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter) + expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter, batch.counterReset) if batch.isOOO { oooSamples += appendedCount } @@ -5453,12 +6038,204 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { var gotSamples []chunks.Sample for _, chunk := range chks[series1.String()] { it := chunk.Chunk.Iterator(nil) - for it.Next() == chunkenc.ValFloat { - ts, v := it.At() - gotSamples = append(gotSamples, sample{t: ts, f: v}) + smpls, err := storage.ExpandSamples(it, newSample) + require.NoError(t, err) + + // Verify that no sample is outside the chunk's time range. + for i, s := range smpls { + switch i { + case 0: + require.Equal(t, chunk.MinTime, s.T(), "first sample %v not at chunk min time %v", s, chunk.MinTime) + case len(smpls) - 1: + require.Equal(t, chunk.MaxTime, s.T(), "last sample %v not at chunk max time %v", s, chunk.MaxTime) + default: + require.GreaterOrEqual(t, s.T(), chunk.MinTime, "sample %v before chunk min time %v", s, chunk.MinTime) + require.LessOrEqual(t, s.T(), chunk.MaxTime, "sample %v after chunk max time %v", s, chunk.MaxTime) + } + } + + gotSamples = append(gotSamples, smpls...) + require.NoError(t, it.Err()) + } + if checkInUseBuckets { + requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets, requireEqualSamplesInUseBucketCompare) + } else { + requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets) + } + }) + } +} + +// TestOOONativeHistogramsWithCounterResets verifies the counter reset headers for in-order and out-of-order samples +// upon ingestion. Note that when the counter reset(s) occur in OOO samples, the header is set to UnknownCounterReset +// rather than CounterReset. This is because with OOO native histogram samples, it cannot be definitely +// determined if a counter reset occurred because the samples are not consecutive, and another sample +// could potentially come in that would change the status of the header. In this case, the UnknownCounterReset +// headers would be re-checked at query time and updated as needed. However, this test is checking the counter +// reset headers at the time of storage. +func TestOOONativeHistogramsWithCounterResets(t *testing.T) { + for name, scenario := range sampleTypeScenarios { + t.Run(name, func(t *testing.T) { + if name == intHistogram || name == floatHistogram { + testOOONativeHistogramsWithCounterResets(t, scenario) + } + }) + } +} + +func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeScenario) { + opts := DefaultOptions() + opts.OutOfOrderCapMax = 30 + opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() + + type resetFunc func(v int64) bool + defaultResetFunc := func(v int64) bool { return false } + + lbls := labels.FromStrings("foo", "bar1") + minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } + + type sampleBatch struct { + from int64 + until int64 + shouldReset resetFunc + expCounterResetHints []histogram.CounterResetHint + } + + tests := []struct { + name string + queryMin int64 + queryMax int64 + batches []sampleBatch + expectedSamples []chunks.Sample + }{ + { + name: "Counter reset within in-order samples", + queryMin: minutes(40), + queryMax: minutes(55), + batches: []sampleBatch{ + // In-order samples + { + from: 40, + until: 50, + shouldReset: func(v int64) bool { + return v == 45 + }, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.CounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset}, + }, + }, + }, + { + name: "Counter reset right at beginning of OOO samples", + queryMin: minutes(40), + queryMax: minutes(55), + batches: []sampleBatch{ + // In-order samples + { + from: 40, + until: 45, + shouldReset: defaultResetFunc, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset}, + }, + { + from: 50, + until: 55, + shouldReset: defaultResetFunc, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset}, + }, + // OOO samples + { + from: 45, + until: 50, + shouldReset: func(v int64) bool { + return v == 45 + }, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset}, + }, + }, + }, + { + name: "Counter resets in both in-order and OOO samples", + queryMin: minutes(40), + queryMax: minutes(55), + batches: []sampleBatch{ + // In-order samples + { + from: 40, + until: 45, + shouldReset: func(v int64) bool { + return v == 44 + }, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.UnknownCounterReset}, + }, + { + from: 50, + until: 55, + shouldReset: defaultResetFunc, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset}, + }, + // OOO samples + { + from: 45, + until: 50, + shouldReset: func(v int64) bool { + return v == 49 + }, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.UnknownCounterReset}, + }, + }, + }, + } + for _, tc := range tests { + t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { + db := openTestDB(t, opts, nil) + db.DisableCompactions() + db.EnableOOONativeHistograms() + defer func() { + require.NoError(t, db.Close()) + }() + + app := db.Appender(context.Background()) + + expSamples := make(map[string][]chunks.Sample) + + for _, batch := range tc.batches { + j := batch.from + smplIdx := 0 + for i := batch.from; i < batch.until; i++ { + resetCount := batch.shouldReset(i) + if resetCount { + j = 0 + } + _, s, err := scenario.appendFunc(app, lbls, minutes(i), j) + require.NoError(t, err) + if s.Type() == chunkenc.ValHistogram { + s.H().CounterResetHint = batch.expCounterResetHints[smplIdx] + } else if s.Type() == chunkenc.ValFloatHistogram { + s.FH().CounterResetHint = batch.expCounterResetHints[smplIdx] + } + expSamples[lbls.String()] = append(expSamples[lbls.String()], s) + j++ + smplIdx++ } } - require.Equal(t, expSamples, gotSamples) + + require.NoError(t, app.Commit()) + + for k, v := range expSamples { + sort.Slice(v, func(i, j int) bool { + return v[i].T() < v[j].T() + }) + expSamples[k] = v + } + + querier, err := db.Querier(tc.queryMin, tc.queryMax) + require.NoError(t, err) + defer querier.Close() + + seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) + require.NotNil(t, seriesSet[lbls.String()]) + require.Len(t, seriesSet, 1) + requireEqualSeries(t, expSamples, seriesSet, false) }) } } @@ -5478,6 +6255,8 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) { db := openTestDB(t, opts, nil) db.DisableCompactions() + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -5555,9 +6334,9 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) { addSample(s2, 255, 265, false) verifyOOOMinMaxTimes(250, 265) testQuery(math.MinInt64, math.MaxInt64) - testQuery(minutes(250), minutes(265)) // Test querying ono data time range - testQuery(minutes(290), minutes(300)) // Test querying in-order data time range - testQuery(minutes(250), minutes(300)) // Test querying the entire range + testQuery(minutes(250), minutes(265)) // Test querying ooo data time range. + testQuery(minutes(290), minutes(300)) // Test querying in-order data time range. + testQuery(minutes(250), minutes(300)) // Test querying the entire range. // Out of time window. addSample(s1, 59, 59, true) @@ -5609,6 +6388,8 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) { opts.OutOfOrderTimeWindow = 0 db := openTestDB(t, opts, nil) db.DisableCompactions() + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -5681,6 +6462,8 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) { opts := DefaultOptions() opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db := openTestDB(t, opts, nil) db.DisableCompactions() @@ -5830,7 +6613,7 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) { resetMmapToOriginal() // We neet to reset because new duplicate chunks can be written above. // Removing m-map markers in WBL by rewriting it. - newWbl, err := wlog.New(log.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), wlog.CompressionNone) + newWbl, err := wlog.New(promslog.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), wlog.CompressionNone) require.NoError(t, err) sr, err := wlog.NewSegmentsReader(originalWblDir) require.NoError(t, err) @@ -5861,6 +6644,380 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) { }) } +func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { + for _, floatHistogram := range []bool{false, true} { + dir := t.TempDir() + ctx := context.Background() + + opts := DefaultOptions() + opts.OutOfOrderCapMax = 30 + opts.OutOfOrderTimeWindow = 500 * time.Minute.Milliseconds() + + db, err := Open(dir, nil, nil, opts, nil) + require.NoError(t, err) + db.DisableCompactions() // We want to manually call it. + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + series1 := labels.FromStrings("foo", "bar1") + series2 := labels.FromStrings("foo", "bar2") + + var series1ExpSamplesPreCompact, series2ExpSamplesPreCompact, series1ExpSamplesPostCompact, series2ExpSamplesPostCompact []chunks.Sample + + addSample := func(ts int64, l labels.Labels, val int, hint histogram.CounterResetHint) sample { + app := db.Appender(context.Background()) + tsMs := ts * time.Minute.Milliseconds() + if floatHistogram { + h := tsdbutil.GenerateTestFloatHistogram(val) + h.CounterResetHint = hint + _, err = app.AppendHistogram(0, l, tsMs, nil, h) + require.NoError(t, err) + require.NoError(t, app.Commit()) + return sample{t: tsMs, fh: h.Copy()} + } + + h := tsdbutil.GenerateTestHistogram(val) + h.CounterResetHint = hint + _, err = app.AppendHistogram(0, l, tsMs, h, nil) + require.NoError(t, err) + require.NoError(t, app.Commit()) + return sample{t: tsMs, h: h.Copy()} + } + + // Add an in-order sample to each series. + s := addSample(520, series1, 1000000, histogram.UnknownCounterReset) + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) + + s = addSample(520, series2, 1000000, histogram.UnknownCounterReset) + series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s) + series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s) + + // Verify that the in-memory ooo chunk is empty. + checkEmptyOOOChunk := func(lbls labels.Labels) { + ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) + require.NoError(t, err) + require.False(t, created) + require.Nil(t, ms.ooo) + } + + checkEmptyOOOChunk(series1) + checkEmptyOOOChunk(series2) + + // Add samples for series1. There are three head chunks that will be created: + // Chunk 1 - Samples between 100 - 440. One explicit counter reset at ts 250. + // Chunk 2 - Samples between 105 - 395. Overlaps with Chunk 1. One detected counter reset at ts 165. + // Chunk 3 - Samples between 480 - 509. All within one block boundary. One detected counter reset at 490. + + // Chunk 1. + // First add 10 samples. + for i := 100; i < 200; i += 10 { + s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset) + // Before compaction, all the samples have UnknownCounterReset even though they've been added to the same + // chunk. This is because they overlap with the samples from chunk two and when merging two chunks on read, + // the header is set as unknown when the next sample is not in the same chunk as the previous one. + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + // After compaction, samples from multiple mmapped chunks will be merged, so there won't be any overlapping + // chunks. Therefore, most samples will have the NotCounterReset header. + // 100 is the first sample in the first chunk in the blocks, so is still set to UnknownCounterReset. + // 120 is a block boundary - after compaction, 120 will be the first sample in a chunk, so is still set to + // UnknownCounterReset. + if i > 100 && i != 120 { + s = copyWithCounterReset(s, histogram.NotCounterReset) + } + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) + } + // Explicit counter reset - the counter reset header is set to CounterReset but the value is higher + // than for the previous timestamp. Explicit counter reset headers are actually ignored though, so when reading + // the sample back you actually get unknown/not counter reset. This is as the chainSampleIterator ignores + // existing headers and sets the header as UnknownCounterReset if the next sample is not in the same chunk as + // the previous one, and counter resets always create a new chunk. + // This case has been added to document what's happening, though it might not be the ideal behavior. + s = addSample(250, series1, 100000+250, histogram.CounterReset) + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, copyWithCounterReset(s, histogram.UnknownCounterReset)) + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, copyWithCounterReset(s, histogram.NotCounterReset)) + + // Add 19 more samples to complete a chunk. + for i := 260; i < 450; i += 10 { + s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset) + // The samples with timestamp less than 410 overlap with the samples from chunk 2, so before compaction, + // they're all UnknownCounterReset. Samples greater than or equal to 410 don't overlap with other chunks + // so they're always detected as NotCounterReset pre and post compaction/ + if i >= 410 { + s = copyWithCounterReset(s, histogram.NotCounterReset) + } + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + // + // 360 is a block boundary, so after compaction its header is still UnknownCounterReset. + if i != 360 { + s = copyWithCounterReset(s, histogram.NotCounterReset) + } + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) + } + + // Chunk 2. + // Add six OOO samples. + for i := 105; i < 165; i += 10 { + s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset) + // Samples overlap with chunk 1 so before compaction all headers are UnknownCounterReset. + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, copyWithCounterReset(s, histogram.NotCounterReset)) + } + + // Add sample that will be detected as a counter reset. + s = addSample(165, series1, 100000, histogram.UnknownCounterReset) + // Before compaction, sample has an UnknownCounterReset header due to the chainSampleIterator. + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + // After compaction, the sample's counter reset is properly detected. + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, copyWithCounterReset(s, histogram.CounterReset)) + + // Add 23 more samples to complete a chunk. + for i := 175; i < 405; i += 10 { + s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset) + // Samples between 205-255 overlap with chunk 1 so before compaction those samples will have the + // UnknownCounterReset header. + if i >= 205 && i < 255 { + s = copyWithCounterReset(s, histogram.NotCounterReset) + } + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + // 245 is the first sample >= the block boundary at 240, so it's still UnknownCounterReset after compaction. + if i != 245 { + s = copyWithCounterReset(s, histogram.NotCounterReset) + } else { + s = copyWithCounterReset(s, histogram.UnknownCounterReset) + } + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) + } + + // Chunk 3. + for i := 480; i < 490; i++ { + s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset) + // No overlapping samples in other chunks, so all other samples will already be detected as NotCounterReset + // before compaction. + if i > 480 { + s = copyWithCounterReset(s, histogram.NotCounterReset) + } + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + // 480 is block boundary. + if i == 480 { + s = copyWithCounterReset(s, histogram.UnknownCounterReset) + } + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) + } + // Counter reset. + s = addSample(int64(490), series1, 100000, histogram.UnknownCounterReset) + s = copyWithCounterReset(s, histogram.CounterReset) + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) + // Add some more samples after the counter reset. + for i := 491; i < 510; i++ { + s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset) + s = copyWithCounterReset(s, histogram.NotCounterReset) + series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) + } + + // Add samples for series2 - one chunk with one detected counter reset at 300. + for i := 200; i < 300; i += 10 { + s = addSample(int64(i), series2, 100000+i, histogram.UnknownCounterReset) + if i > 200 { + s = copyWithCounterReset(s, histogram.NotCounterReset) + } + series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s) + if i == 240 { + s = copyWithCounterReset(s, histogram.UnknownCounterReset) + } + series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s) + } + // Counter reset. + s = addSample(int64(300), series2, 100000, histogram.UnknownCounterReset) + s = copyWithCounterReset(s, histogram.CounterReset) + series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s) + series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s) + // Add some more samples after the counter reset. + for i := 310; i < 500; i += 10 { + s := addSample(int64(i), series2, 100000+i, histogram.UnknownCounterReset) + s = copyWithCounterReset(s, histogram.NotCounterReset) + series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s) + // 360 and 480 are block boundaries. + if i == 360 || i == 480 { + s = copyWithCounterReset(s, histogram.UnknownCounterReset) + } + series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s) + } + + // Sort samples (as OOO samples not added in time-order). + sort.Slice(series1ExpSamplesPreCompact, func(i, j int) bool { + return series1ExpSamplesPreCompact[i].T() < series1ExpSamplesPreCompact[j].T() + }) + sort.Slice(series1ExpSamplesPostCompact, func(i, j int) bool { + return series1ExpSamplesPostCompact[i].T() < series1ExpSamplesPostCompact[j].T() + }) + sort.Slice(series2ExpSamplesPreCompact, func(i, j int) bool { + return series2ExpSamplesPreCompact[i].T() < series2ExpSamplesPreCompact[j].T() + }) + sort.Slice(series2ExpSamplesPostCompact, func(i, j int) bool { + return series2ExpSamplesPostCompact[i].T() < series2ExpSamplesPostCompact[j].T() + }) + + verifyDBSamples := func(s1Samples, s2Samples []chunks.Sample) { + expRes := map[string][]chunks.Sample{ + series1.String(): s1Samples, + series2.String(): s2Samples, + } + + q, err := db.Querier(math.MinInt64, math.MaxInt64) + require.NoError(t, err) + actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) + requireEqualSeries(t, expRes, actRes, false) + } + + // Verify DB samples before compaction. + verifyDBSamples(series1ExpSamplesPreCompact, series2ExpSamplesPreCompact) + + // Verify that the in-memory ooo chunk is not empty. + checkNonEmptyOOOChunk := func(lbls labels.Labels) { + ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) + require.NoError(t, err) + require.False(t, created) + require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) + } + + checkNonEmptyOOOChunk(series1) + checkNonEmptyOOOChunk(series2) + + // No blocks before compaction. + require.Empty(t, db.Blocks()) + + // There is a 0th WBL file. + require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows + files, err := os.ReadDir(db.head.wbl.Dir()) + require.NoError(t, err) + require.Len(t, files, 1) + require.Equal(t, "00000000", files[0].Name()) + f, err := files[0].Info() + require.NoError(t, err) + require.Greater(t, f.Size(), int64(100)) + + // OOO compaction happens here. + require.NoError(t, db.CompactOOOHead(ctx)) + + // Check that blocks are created after compaction. + require.Len(t, db.Blocks(), 5) + + // Check samples after compaction. + verifyDBSamples(series1ExpSamplesPostCompact, series2ExpSamplesPostCompact) + + // 0th WBL file will be deleted and 1st will be the only present. + files, err = os.ReadDir(db.head.wbl.Dir()) + require.NoError(t, err) + require.Len(t, files, 1) + require.Equal(t, "00000001", files[0].Name()) + f, err = files[0].Info() + require.NoError(t, err) + require.Equal(t, int64(0), f.Size()) + + // OOO stuff should not be present in the Head now. + checkEmptyOOOChunk(series1) + checkEmptyOOOChunk(series2) + + verifyBlockSamples := func(block *Block, fromMins, toMins int64) { + var series1Samples, series2Samples []chunks.Sample + + for _, s := range series1ExpSamplesPostCompact { + if s.T() >= fromMins*time.Minute.Milliseconds() { + // Samples should be sorted, so break out of loop when we reach a timestamp that's too big. + if s.T() > toMins*time.Minute.Milliseconds() { + break + } + series1Samples = append(series1Samples, s) + } + } + for _, s := range series2ExpSamplesPostCompact { + if s.T() >= fromMins*time.Minute.Milliseconds() { + // Samples should be sorted, so break out of loop when we reach a timestamp that's too big. + if s.T() > toMins*time.Minute.Milliseconds() { + break + } + series2Samples = append(series2Samples, s) + } + } + + expRes := map[string][]chunks.Sample{} + if len(series1Samples) != 0 { + expRes[series1.String()] = series1Samples + } + if len(series2Samples) != 0 { + expRes[series2.String()] = series2Samples + } + + q, err := NewBlockQuerier(block, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + + actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) + requireEqualSeries(t, expRes, actRes, false) + } + + // Checking for expected data in the blocks. + verifyBlockSamples(db.Blocks()[0], 100, 119) + verifyBlockSamples(db.Blocks()[1], 120, 239) + verifyBlockSamples(db.Blocks()[2], 240, 359) + verifyBlockSamples(db.Blocks()[3], 360, 479) + verifyBlockSamples(db.Blocks()[4], 480, 509) + + // There should be a single m-map file. + mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot) + files, err = os.ReadDir(mmapDir) + require.NoError(t, err) + require.Len(t, files, 1) + + // Compact the in-order head and expect another block. + // Since this is a forced compaction, this block is not aligned with 2h. + err = db.CompactHead(NewRangeHead(db.head, 500*time.Minute.Milliseconds(), 550*time.Minute.Milliseconds())) + require.NoError(t, err) + require.Len(t, db.Blocks(), 6) + verifyBlockSamples(db.Blocks()[5], 520, 520) + + // Blocks created out of normal and OOO head now. But not merged. + verifyDBSamples(series1ExpSamplesPostCompact, series2ExpSamplesPostCompact) + + // The compaction also clears out the old m-map files. Including + // the file that has ooo chunks. + files, err = os.ReadDir(mmapDir) + require.NoError(t, err) + require.Len(t, files, 1) + require.Equal(t, "000001", files[0].Name()) + + // This will merge overlapping block. + require.NoError(t, db.Compact(ctx)) + + require.Len(t, db.Blocks(), 5) + verifyBlockSamples(db.Blocks()[0], 100, 119) + verifyBlockSamples(db.Blocks()[1], 120, 239) + verifyBlockSamples(db.Blocks()[2], 240, 359) + verifyBlockSamples(db.Blocks()[3], 360, 479) + verifyBlockSamples(db.Blocks()[4], 480, 520) // Merged block. + + // Final state. Blocks from normal and OOO head are merged. + verifyDBSamples(series1ExpSamplesPostCompact, series2ExpSamplesPostCompact) + } +} + +func copyWithCounterReset(s sample, hint histogram.CounterResetHint) sample { + if s.h != nil { + h := s.h.Copy() + h.CounterResetHint = hint + return sample{t: s.t, h: h} + } + + h := s.fh.Copy() + h.CounterResetHint = hint + return sample{t: s.t, fh: h} +} + func TestOOOCompactionFailure(t *testing.T) { for name, scenario := range sampleTypeScenarios { t.Run(name, func(t *testing.T) { @@ -5880,6 +7037,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) db.DisableCompactions() // We want to manually call it. + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -5907,7 +7066,7 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { // There is a 0th WBL file. verifyFirstWBLFileIs0 := func(count int) { - require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows + require.NoError(t, db.head.wbl.Sync()) // Syncing to make sure wbl is flushed in windows. files, err := os.ReadDir(db.head.wbl.Dir()) require.NoError(t, err) require.Len(t, files, count) @@ -5961,7 +7120,7 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { require.Len(t, db.Blocks(), 3) require.Equal(t, oldBlocks, db.Blocks()) - // There should be a single m-map file + // There should be a single m-map file. verifyMmapFiles("000001") // All but last WBL file will be deleted. @@ -6057,7 +7216,7 @@ func TestWBLCorruption(t *testing.T) { // should be deleted after replay. // Checking where we corrupt it. - require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows + require.NoError(t, db.head.wbl.Sync()) // Syncing to make sure wbl is flushed in windows. files, err := os.ReadDir(db.head.wbl.Dir()) require.NoError(t, err) require.Len(t, files, 2) @@ -6080,7 +7239,7 @@ func TestWBLCorruption(t *testing.T) { addSamples(310, 320, false) // Verifying that we have data after corruption point. - require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows + require.NoError(t, db.head.wbl.Sync()) // Syncing to make sure wbl is flushed in windows. files, err = os.ReadDir(db.head.wbl.Dir()) require.NoError(t, err) require.Len(t, files, 3) @@ -6167,6 +7326,8 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) { opts := DefaultOptions() opts.OutOfOrderCapMax = 10 opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -6300,6 +7461,8 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) { opts := DefaultOptions() opts.OutOfOrderTimeWindow = oooTimeWindow + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -6593,6 +7756,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) { opts := DefaultOptions() opts.OutOfOrderTimeWindow = 30 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -6651,6 +7816,8 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce opts := DefaultOptions() opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -6718,6 +7885,8 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) { opts := DefaultOptions() opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -6775,6 +7944,8 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari opts := DefaultOptions() opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) @@ -7147,28 +8318,16 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { createBlock(t, db.Dir(), series) for _, s := range series { - key := s.Labels().String() + lbls := s.Labels().String() + slice := exp[lbls] it = s.Iterator(it) - slice := exp[key] - for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() { - switch typ { - case chunkenc.ValFloat: - ts, v := it.At() - slice = append(slice, sample{t: ts, f: v}) - case chunkenc.ValHistogram: - ts, h := it.AtHistogram(nil) - slice = append(slice, sample{t: ts, h: h}) - case chunkenc.ValFloatHistogram: - ts, h := it.AtFloatHistogram(nil) - slice = append(slice, sample{t: ts, fh: h}) - default: - t.Fatalf("unexpected sample value type %d", typ) - } - } + smpls, err := storage.ExpandSamples(it, nil) + require.NoError(t, err) + slice = append(slice, smpls...) sort.Slice(slice, func(i, j int) bool { return slice[i].T() < slice[j].T() }) - exp[key] = slice + exp[lbls] = slice } } @@ -7201,10 +8360,10 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { // due to origin from different overlapping chunks anymore. for _, ss := range exp { for i, s := range ss[1:] { - if s.H() != nil && ss[i].H() != nil && s.H().CounterResetHint == histogram.UnknownCounterReset { + if s.Type() == chunkenc.ValHistogram && ss[i].Type() == chunkenc.ValHistogram && s.H().CounterResetHint == histogram.UnknownCounterReset { s.H().CounterResetHint = histogram.NotCounterReset } - if s.FH() != nil && ss[i].FH() != nil && s.FH().CounterResetHint == histogram.UnknownCounterReset { + if s.Type() == chunkenc.ValFloatHistogram && ss[i].Type() == chunkenc.ValFloatHistogram && s.FH().CounterResetHint == histogram.UnknownCounterReset { s.FH().CounterResetHint = histogram.NotCounterReset } } @@ -7328,6 +8487,112 @@ func TestNativeHistogramFlag(t *testing.T) { }, act) } +func TestOOONativeHistogramFlag(t *testing.T) { + h := &histogram.Histogram{ + Count: 9, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 35.5, + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + } + + l := labels.FromStrings("foo", "bar") + + t.Run("Test OOO native histograms if OOO is disabled", func(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 0 + db := openTestDB(t, opts, []int64{100}) + defer func() { + require.NoError(t, db.Close()) + }() + + // Enable Native Histograms and OOO Native Histogram ingestion + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() + + app := db.Appender(context.Background()) + _, err := app.AppendHistogram(0, l, 100, h, nil) + require.NoError(t, err) + + _, err = app.AppendHistogram(0, l, 50, h, nil) + require.NoError(t, err) // The OOO sample is not detected until it is committed, so no error is returned + + require.NoError(t, app.Commit()) + + q, err := db.Querier(math.MinInt, math.MaxInt64) + require.NoError(t, err) + act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.Equal(t, map[string][]chunks.Sample{ + l.String(): {sample{t: 100, h: h}}, + }, act) + }) + t.Run("Test OOO Native Histograms if Native Histograms are disabled", func(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 100 + db := openTestDB(t, opts, []int64{100}) + defer func() { + require.NoError(t, db.Close()) + }() + + // Disable Native Histograms and enable OOO Native Histogram ingestion + db.DisableNativeHistograms() + db.EnableOOONativeHistograms() + + // Attempt to add an in-order sample + app := db.Appender(context.Background()) + _, err := app.AppendHistogram(0, l, 200, h, nil) + require.Equal(t, storage.ErrNativeHistogramsDisabled, err) + + // Attempt to add an OOO sample + _, err = app.AppendHistogram(0, l, 100, h, nil) + require.Equal(t, storage.ErrNativeHistogramsDisabled, err) + + require.NoError(t, app.Commit()) + + q, err := db.Querier(math.MinInt, math.MaxInt64) + require.NoError(t, err) + act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.Equal(t, map[string][]chunks.Sample{}, act) + }) + t.Run("Test OOO native histograms when flag is enabled", func(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 100 + db := openTestDB(t, opts, []int64{100}) + defer func() { + require.NoError(t, db.Close()) + }() + + // Enable Native Histograms and OOO Native Histogram ingestion + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() + + // Add in-order samples + app := db.Appender(context.Background()) + _, err := app.AppendHistogram(0, l, 200, h, nil) + require.NoError(t, err) + + // Add OOO samples + _, err = app.AppendHistogram(0, l, 100, h, nil) + require.NoError(t, err) + _, err = app.AppendHistogram(0, l, 150, h, nil) + require.NoError(t, err) + + require.NoError(t, app.Commit()) + + q, err := db.Querier(math.MinInt, math.MaxInt64) + require.NoError(t, err) + act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + requireEqualSeries(t, map[string][]chunks.Sample{ + l.String(): {sample{t: 100, h: h}, sample{t: 150, h: h}, sample{t: 200, h: h}}, + }, act, true) + }) +} + // compareSeries essentially replaces `require.Equal(t, expected, actual) in // situations where the actual series might contain more counter reset hints // "unknown" than the expected series. This can easily happen for long series @@ -7343,29 +8608,47 @@ func compareSeries(t require.TestingT, expected, actual map[string][]chunks.Samp // package. require.Equal(t, expected, actual, "number of series differs") } - for key, eSamples := range expected { - aSamples, ok := actual[key] + for key, expSamples := range expected { + actSamples, ok := actual[key] if !ok { require.Equal(t, expected, actual, "expected series %q not found", key) } - if len(eSamples) != len(aSamples) { - require.Equal(t, eSamples, aSamples, "number of samples for series %q differs", key) - } - for i, eS := range eSamples { - aS := aSamples[i] - aH, eH := aS.H(), eS.H() - aFH, eFH := aS.FH(), eS.FH() - switch { - case aH != nil && eH != nil && aH.CounterResetHint == histogram.UnknownCounterReset && eH.CounterResetHint != histogram.GaugeType: - eH = eH.Copy() - eH.CounterResetHint = histogram.UnknownCounterReset - eS = sample{t: eS.T(), h: eH} - case aFH != nil && eFH != nil && aFH.CounterResetHint == histogram.UnknownCounterReset && eFH.CounterResetHint != histogram.GaugeType: - eFH = eFH.Copy() - eFH.CounterResetHint = histogram.UnknownCounterReset - eS = sample{t: eS.T(), fh: eFH} + if len(expSamples) != len(actSamples) { + require.Equal(t, expSamples, actSamples, "number of samples for series %q differs", key) + } + + for i, eS := range expSamples { + aS := actSamples[i] + + // Must use the interface as Equal does not work when actual types differ + // not only does the type differ, but chunk.Sample.FH() interface may auto convert from chunk.Sample.H()! + require.Equal(t, eS.T(), aS.T(), "timestamp of sample %d in series %q differs", i, key) + + require.Equal(t, eS.Type(), aS.Type(), "type of sample %d in series %q differs", i, key) + + switch eS.Type() { + case chunkenc.ValFloat: + require.Equal(t, eS.F(), aS.F(), "sample %d in series %q differs", i, key) + case chunkenc.ValHistogram: + eH, aH := eS.H(), aS.H() + if aH.CounterResetHint == histogram.UnknownCounterReset && aH.CounterResetHint != histogram.GaugeType { + eH = eH.Copy() + // It is always safe to set the counter reset hint to UnknownCounterReset + eH.CounterResetHint = histogram.UnknownCounterReset + eS = sample{t: eS.T(), h: eH} + } + require.Equal(t, eH, aH, "histogram sample %d in series %q differs", i, key) + + case chunkenc.ValFloatHistogram: + eFH, aFH := eS.FH(), aS.FH() + if aFH.CounterResetHint == histogram.UnknownCounterReset && aFH.CounterResetHint != histogram.GaugeType { + eFH = eFH.Copy() + // It is always safe to set the counter reset hint to UnknownCounterReset + eFH.CounterResetHint = histogram.UnknownCounterReset + eS = sample{t: eS.T(), fh: eFH} + } + require.Equal(t, eFH, aFH, "float histogram sample %d in series %q differs", i, key) } - require.Equal(t, eS, aS, "sample %d in series %q differs", i, key) } } } @@ -7468,7 +8751,7 @@ func TestAbortBlockCompactions(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() - // It should NOT be compactible at the beginning of the test + // It should NOT be compactable at the beginning of the test require.False(t, db.head.compactable(), "head should NOT be compactable") // Track the number of compactions run inside db.compactBlocks() @@ -7478,7 +8761,7 @@ func TestAbortBlockCompactions(t *testing.T) { db.compactor = &mockCompactorFn{ planFn: func() ([]string, error) { // On every Plan() run increment compactions. After 4 compactions - // update HEAD to make it compactible to force an exit from db.compactBlocks() loop. + // update HEAD to make it compactable to force an exit from db.compactBlocks() loop. compactions++ if compactions > 3 { chunkRange := db.head.chunkRange.Load() @@ -7507,7 +8790,7 @@ func TestNewCompactorFunc(t *testing.T) { opts := DefaultOptions() block1 := ulid.MustNew(1, nil) block2 := ulid.MustNew(2, nil) - opts.NewCompactorFunc = func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) { + opts.NewCompactorFunc = func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) { return &mockCompactorFn{ planFn: func() ([]string, error) { return []string{block1.String(), block2.String()}, nil @@ -7613,23 +8896,151 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) { } func TestGenerateCompactionDelay(t *testing.T) { - assertDelay := func(delay time.Duration) { + assertDelay := func(delay time.Duration, expectedMaxPercentDelay int) { t.Helper() require.GreaterOrEqual(t, delay, time.Duration(0)) - // Less than 10% of the chunkRange. - require.LessOrEqual(t, delay, 6000*time.Millisecond) + // Expect to generate a delay up to MaxPercentDelay of the head chunk range + require.LessOrEqual(t, delay, (time.Duration(60000*expectedMaxPercentDelay/100) * time.Millisecond)) } opts := DefaultOptions() + cases := []struct { + compactionDelayPercent int + }{ + { + compactionDelayPercent: 1, + }, + { + compactionDelayPercent: 10, + }, + { + compactionDelayPercent: 60, + }, + { + compactionDelayPercent: 100, + }, + } + opts.EnableDelayedCompaction = true - db := openTestDB(t, opts, []int64{60000}) - defer func() { - require.NoError(t, db.Close()) + + for _, c := range cases { + opts.CompactionDelayMaxPercent = c.compactionDelayPercent + db := openTestDB(t, opts, []int64{60000}) + defer func() { + require.NoError(t, db.Close()) + }() + // The offset is generated and changed while opening. + assertDelay(db.opts.CompactionDelay, c.compactionDelayPercent) + + for i := 0; i < 1000; i++ { + assertDelay(db.generateCompactionDelay(), c.compactionDelayPercent) + } + } +} + +type blockedResponseRecorder struct { + r *httptest.ResponseRecorder + + // writeblocked is used to block writing until the test wants it to resume. + writeBlocked chan struct{} + // writeStarted is closed by blockedResponseRecorder to signal that writing has started. + writeStarted chan struct{} +} + +func (br *blockedResponseRecorder) Write(buf []byte) (int, error) { + select { + case <-br.writeStarted: + default: + close(br.writeStarted) + } + + <-br.writeBlocked + return br.r.Write(buf) +} + +func (br *blockedResponseRecorder) Header() http.Header { return br.r.Header() } + +func (br *blockedResponseRecorder) WriteHeader(code int) { br.r.WriteHeader(code) } + +func (br *blockedResponseRecorder) Flush() { br.r.Flush() } + +// TestBlockClosingBlockedDuringRemoteRead ensures that a TSDB Block is not closed while it is being queried +// through remote read. This is a regression test for https://github.com/prometheus/prometheus/issues/14422. +// TODO: Ideally, this should reside in storage/remote/read_handler_test.go once the necessary TSDB utils are accessible there. +func TestBlockClosingBlockedDuringRemoteRead(t *testing.T) { + dir := t.TempDir() + + createBlock(t, dir, genSeries(2, 1, 0, 10)) + db, err := Open(dir, nil, nil, nil, nil) + require.NoError(t, err) + // No error checking as manually closing the block is supposed to make this fail. + defer db.Close() + + readAPI := remote.NewReadHandler(nil, nil, db, func() config.Config { + return config.Config{} + }, + 0, 1, 0, + ) + + matcher, err := labels.NewMatcher(labels.MatchRegexp, "__name__", ".*") + require.NoError(t, err) + + query, err := remote.ToQuery(0, 10, []*labels.Matcher{matcher}, nil) + require.NoError(t, err) + + req := &prompb.ReadRequest{ + Queries: []*prompb.Query{query}, + AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS}, + } + data, err := proto.Marshal(req) + require.NoError(t, err) + + request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(snappy.Encode(nil, data))) + require.NoError(t, err) + + blockedRecorder := &blockedResponseRecorder{ + r: httptest.NewRecorder(), + writeBlocked: make(chan struct{}), + writeStarted: make(chan struct{}), + } + + readDone := make(chan struct{}) + go func() { + readAPI.ServeHTTP(blockedRecorder, request) + require.Equal(t, http.StatusOK, blockedRecorder.r.Code) + close(readDone) }() - // The offset is generated and changed while opening. - assertDelay(db.opts.CompactionDelay) - for i := 0; i < 1000; i++ { - assertDelay(db.generateCompactionDelay()) + // Wait for the read API to start streaming data. + <-blockedRecorder.writeStarted + + // Try to close the queried block. + blockClosed := make(chan struct{}) + go func() { + for _, block := range db.Blocks() { + block.Close() + } + close(blockClosed) + }() + + // Closing the queried block should block. + // Wait a little bit to make sure of that. + select { + case <-time.After(100 * time.Millisecond): + case <-readDone: + require.Fail(t, "read API should still be streaming data.") + case <-blockClosed: + require.Fail(t, "Block shouldn't get closed while being queried.") + } + + // Resume the read API data streaming. + close(blockedRecorder.writeBlocked) + <-readDone + + // The block should be no longer needed and closing it should end. + select { + case <-time.After(10 * time.Millisecond): + require.Fail(t, "Closing the block timed out.") + case <-blockClosed: } } diff --git a/tsdb/docs/format/chunks.md b/tsdb/docs/format/chunks.md index 8318e0a5403..7eb0820e44d 100644 --- a/tsdb/docs/format/chunks.md +++ b/tsdb/docs/format/chunks.md @@ -29,14 +29,15 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes). # Chunk ``` -┌───────────────┬───────────────────┬──────────────┬────────────────┐ -│ len │ encoding <1 byte> │ data │ CRC32 <4 byte> │ -└───────────────┴───────────────────┴──────────────┴────────────────┘ +┌───────────────┬───────────────────┬─────────────┬────────────────┐ +│ len │ encoding <1 byte> │ data │ CRC32 <4 byte> │ +└───────────────┴───────────────────┴─────────────┴────────────────┘ ``` Notes: * `` has 1 to 10 bytes. -* `encoding`: Currently either `XOR` or `histogram`. +* `encoding`: Currently either `XOR`, `histogram`, or `floathistogram`, see + [code for numerical values](https://github.com/prometheus/prometheus/blob/02d0de9987ad99dee5de21853715954fadb3239f/tsdb/chunkenc/chunk.go#L28-L47). * `data`: See below for each encoding. ## XOR chunk data @@ -67,9 +68,9 @@ Notes: ## Histogram chunk data ``` -┌──────────────────────┬──────────────────────────┬───────────────────────────────┬─────────────────────┬──────────────────┬──────────────────┬────────────────┬──────────────────┐ -│ num_samples │ histogram_flags <1 byte> │ zero_threshold <1 or 9 bytes> │ schema │ pos_spans │ neg_spans │ samples │ padding │ -└──────────────────────┴──────────────────────────┴───────────────────────────────┴─────────────────────┴──────────────────┴──────────────────┴────────────────┴──────────────────┘ +┌──────────────────────┬──────────────────────────┬───────────────────────────────┬─────────────────────┬──────────────────┬──────────────────┬──────────────────────┬────────────────┬──────────────────┐ +│ num_samples │ histogram_flags <1 byte> │ zero_threshold <1 or 9 bytes> │ schema │ pos_spans │ neg_spans │ custom_values │ samples │ padding │ +└──────────────────────┴──────────────────────────┴───────────────────────────────┴─────────────────────┴──────────────────┴──────────────────┴──────────────────────┴────────────────┴──────────────────┘ ``` ### Positive and negative spans data: @@ -80,6 +81,16 @@ Notes: └─────────────────────────┴────────────────────────┴───────────────────────┴────────────────────────┴───────────────────────┴─────┴────────────────────────┴───────────────────────┘ ``` +### Custom values data: + +The `custom_values` data is currently only used for schema -53 (custom bucket boundaries). For other schemas, it is empty (length of zero). + +``` +┌──────────────────────────┬──────────────────┬──────────────────┬─────┬──────────────────┐ +│ num_values │ value_0 │ value_1 │ ... │ value_n │ +└──────────────────────────┴─────────────────────────────────────┴─────┴──────────────────┘ +``` + ### Samples data: ``` @@ -92,7 +103,7 @@ Notes: ├──────────────────────────┤ │ ... │ ├──────────────────────────┤ -│ Sample_n │ +│ sample_n │ └──────────────────────────┘ ``` @@ -107,9 +118,9 @@ Notes: #### Sample 1 data: ``` -┌────────────────────────┬───────────────────────────┬────────────────────────────────┬──────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┐ -│ ts_delta │ count_delta │ zero_count_delta │ sum_xor │ pos_bucket_0_delta │ ... │ pos_bucket_n_delta │ neg_bucket_0_delta │ ... │ neg_bucket_n_delta │ -└────────────────────────┴───────────────────────────┴────────────────────────────────┴──────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┘ +┌───────────────────────┬──────────────────────────┬───────────────────────────────┬──────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┐ +│ ts_delta │ count_delta │ zero_count_delta │ sum_xor │ pos_bucket_0_delta │ ... │ pos_bucket_n_delta │ neg_bucket_0_delta │ ... │ neg_bucket_n_delta │ +└───────────────────────┴──────────────────────────┴───────────────────────────────┴──────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┘ ``` #### Sample 2 data and following: @@ -131,7 +142,9 @@ Notes: * If 0, it is a single zero byte. * If a power of two between 2^-243 and 2^10, it is a single byte between 1 and 254. * Otherwise, it is a byte with all bits set (255), followed by a float64, resulting in 9 bytes length. -* `schema` is a specific value defined by the exposition format. Currently valid values are -4 <= n <= 8. +* `schema` is a specific value defined by the exposition format. Currently + valid values are either -4 <= n <= 8 (standard exponential schemas) or -53 + (custom bucket boundaries). * `` is a variable bitwidth encoding for signed integers, optimized for “delta of deltas” of bucket deltas. It has between 1 bit and 9 bytes. See [code for details](https://github.com/prometheus/prometheus/blob/8c1507ebaa4ca552958ffb60c2d1b21afb7150e4/tsdb/chunkenc/varbit.go#L31-L60). * `` is a variable bitwidth encoding for unsigned integers with the same bit-bucketing as ``. @@ -142,3 +155,69 @@ Notes: * Note that buckets are inherently deltas between the current bucket and the previous bucket. Only `bucket_0` is an absolute count. * The chunk can have as few as one sample, i.e. sample 1 and following are optional. * Similarly, there could be down to zero spans and down to zero buckets. + +The `` encoding within the custom values data depends on the schema. +For schema -53 (custom bucket boundaries, currently the only use case for +custom values), the values to encode are bucket boundaries in the form of +floats. The encoding of a given float value _x_ works as follows: + +1. Create an intermediate value _y_ = _x_ * 1000. +2. If 0 ≤ _y_ ≤ 33554430 _and_ if the decimal value of _y_ is integer, store + _y_ + 1 as ``. +3. Otherwise, store a 0 bit, followed by the 64 bit of the original _x_ + encoded as plain ``. + +Note that values stored as per (2) will always start with a 1 bit, which allow +decoders to recognize this case in contrast to values stores as per (3), which +always start with a 0 bit. + +The rational behind this encoding is that most custom bucket boundaries are set +by humans as decimal numbers with not very many decimal places. In most cases, +the encoding will therefore result in a short varbit representation. The upper +bound of 33554430 is picked so that the varbit encoded value will take at most +4 bytes. + + +## Float histogram chunk data + +Float histograms have the same layout as histograms apart from the encoding of samples. + +### Samples data: + +``` +┌──────────────────────────┐ +│ sample_0 │ +├──────────────────────────┤ +│ sample_1 │ +├──────────────────────────┤ +│ sample_2 │ +├──────────────────────────┤ +│ ... │ +├──────────────────────────┤ +│ sample_n │ +└──────────────────────────┘ +``` + +#### Sample 0 data: + +``` +┌─────────────────┬─────────────────┬──────────────────────┬───────────────┬────────────────────────┬─────┬────────────────────────┬────────────────────────┬─────┬────────────────────────┐ +│ ts │ count │ zero_count │ sum │ pos_bucket_0 │ ... │ pos_bucket_n │ neg_bucket_0 │ ... │ neg_bucket_n │ +└─────────────────┴─────────────────┴──────────────────────┴───────────────┴────────────────────────┴─────┴────────────────────────┴────────────────────────┴─────┴────────────────────────┘ +``` + +#### Sample 1 data: + +``` +┌───────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐ +│ ts_delta │ count_xor │ zero_count_xor │ sum_xor │ pos_bucket_0_xor │ ... │ pos_bucket_n_xor │ neg_bucket_0_xor │ ... │ neg_bucket_n_xor │ +└───────────────────────┴────────────────────────┴─────────────────────────────┴──────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┘ +``` + +#### Sample 2 data and following: + +``` +┌─────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐ +│ ts_dod │ count_xor │ zero_count_xor │ sum_xor │ pos_bucket_0_xor │ ... │ pos_bucket_n_xor │ neg_bucket_0_xor │ ... │ neg_bucket_n_xor │ +└─────────────────────┴────────────────────────┴─────────────────────────────┴──────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┘ +``` diff --git a/tsdb/docs/format/head_chunks.md b/tsdb/docs/format/head_chunks.md index 5737f420589..7040dcf41ad 100644 --- a/tsdb/docs/format/head_chunks.md +++ b/tsdb/docs/format/head_chunks.md @@ -37,3 +37,7 @@ is used while replaying the chunks. | series ref <8 byte> | mint <8 byte, uint64> | maxt <8 byte, uint64> | encoding <1 byte> | len | data │ CRC32 <4 byte> │ └─────────────────────┴───────────────────────┴───────────────────────┴───────────────────┴───────────────┴──────────────┴────────────────┘ ``` + +## OOO encoding + +Head chunks use the highest bit of the `encoding` field to indicate whether it is out-of-order (1) or not (0). This bit is not set for chunks in the on-disk blocks. diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index 88fdd30c850..cc7d0990f6a 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -20,7 +20,6 @@ import ( "hash" "hash/crc32" "math" - "unsafe" "github.com/dennwc/varint" ) @@ -75,8 +74,7 @@ func (e *Encbuf) PutVarint64(x int64) { // PutUvarintStr writes a string to the buffer prefixed by its varint length (in bytes!). func (e *Encbuf) PutUvarintStr(s string) { - b := *(*[]byte)(unsafe.Pointer(&s)) - e.PutUvarint(len(b)) + e.PutUvarint(len(s)) e.PutString(s) } @@ -201,8 +199,9 @@ func (d *Decbuf) UvarintStr() string { return string(d.UvarintBytes()) } -// UvarintBytes returns invalid values if the byte slice goes away. -// Compared to UvarintStr, it avoid allocations. +// UvarintBytes returns a pointer to internal data; +// the return value becomes invalid if the byte slice goes away. +// Compared to UvarintStr, this avoids allocations. func (d *Decbuf) UvarintBytes() []byte { l := d.Uvarint64() if d.E != nil { diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 7545ab9a602..31d461bed9e 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -29,7 +29,7 @@ import ( ) const ( - // Indicates that there is no index entry for an exmplar. + // Indicates that there is no index entry for an exemplar. noExemplar = -1 // Estimated number of exemplars per series, for sizing the index. estimatedExemplarsPerSeries = 16 @@ -152,13 +152,13 @@ func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { ret := make([]exemplar.QueryResult, 0) + ce.lock.RLock() + defer ce.lock.RUnlock() + if len(ce.exemplars) == 0 { return ret, nil } - ce.lock.RLock() - defer ce.lock.RUnlock() - // Loop through each index entry, which will point us to first/last exemplar for each series. for _, idx := range ce.index { var se exemplar.QueryResult @@ -281,13 +281,13 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { l = 0 } + ce.lock.Lock() + defer ce.lock.Unlock() + if l == int64(len(ce.exemplars)) { return 0 } - ce.lock.Lock() - defer ce.lock.Unlock() - oldBuffer := ce.exemplars oldNextIndex := int64(ce.nextIndex) @@ -349,6 +349,11 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry, buf []byt } func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error { + // TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale. + // Optimize by moving the lock to be per series (& benchmark it). + ce.lock.Lock() + defer ce.lock.Unlock() + if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } @@ -356,11 +361,6 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp var buf [1024]byte seriesLabels := l.Bytes(buf[:]) - // TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale. - // Optimize by moving the lock to be per series (& benchmark it). - ce.lock.Lock() - defer ce.lock.Unlock() - idx, ok := ce.index[string(seriesLabels)] err := ce.validateExemplar(idx, e, true) if err != nil { diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index 7723ec38942..dbd34cc48c4 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -20,6 +20,7 @@ import ( "reflect" "strconv" "strings" + "sync" "testing" "github.com/prometheus/client_golang/prometheus" @@ -499,3 +500,40 @@ func BenchmarkResizeExemplars(b *testing.B) { }) } } + +// TestCircularExemplarStorage_Concurrent_AddExemplar_Resize tries to provoke a data race between AddExemplar and Resize. +// Run with race detection enabled. +func TestCircularExemplarStorage_Concurrent_AddExemplar_Resize(t *testing.T) { + exs, err := NewCircularExemplarStorage(0, eMetrics) + require.NoError(t, err) + es := exs.(*CircularExemplarStorage) + + l := labels.FromStrings("service", "asdf") + e := exemplar.Exemplar{ + Labels: labels.FromStrings("trace_id", "qwerty"), + Value: 0.1, + Ts: 1, + } + + var wg sync.WaitGroup + wg.Add(1) + t.Cleanup(wg.Wait) + + started := make(chan struct{}) + + go func() { + defer wg.Done() + + <-started + for i := 0; i < 100; i++ { + require.NoError(t, es.AddExemplar(l, e)) + } + }() + + for i := 0; i < 100; i++ { + es.Resize(int64(i + 1)) + if i == 0 { + close(started) + } + } +} diff --git a/tsdb/head.go b/tsdb/head.go index 4ff7aab6322..c67c438e525 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "path/filepath" "runtime" @@ -25,12 +26,11 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "go.uber.org/atomic" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -84,7 +84,7 @@ type Head struct { wal, wbl *wlog.WL exemplarMetrics *ExemplarMetrics exemplars ExemplarStorage - logger log.Logger + logger *slog.Logger appendPool zeropool.Pool[[]record.RefSample] exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef] histogramsPool zeropool.Pool[[]record.RefHistogramSample] @@ -150,9 +150,10 @@ type HeadOptions struct { // EnableNativeHistograms enables the ingestion of native histograms. EnableNativeHistograms atomic.Bool - // EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample. - // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md - EnableCreatedTimestampZeroIngestion bool + // EnableOOONativeHistograms enables the ingestion of OOO native histograms. + // It will only take effect if EnableNativeHistograms is set to true and the + // OutOfOrderTimeWindow is > 0 + EnableOOONativeHistograms atomic.Bool ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. @@ -222,10 +223,10 @@ type SeriesLifecycleCallback interface { } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { +func NewHead(r prometheus.Registerer, l *slog.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { var err error if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if opts.OutOfOrderTimeWindow.Load() < 0 { @@ -561,7 +562,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }, func() float64 { val, err := h.chunkDiskMapper.Size() if err != nil { - level.Error(h.logger).Log("msg", "Failed to calculate size of \"chunks_head\" dir", + h.logger.Error("Failed to calculate size of \"chunks_head\" dir", "err", err.Error()) } return float64(val) @@ -624,7 +625,7 @@ func (h *Head) Init(minValidTime int64) error { } }() - level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") + h.logger.Info("Replaying on-disk memory mappable chunks if any") start := time.Now() snapIdx, snapOffset := -1, 0 @@ -633,7 +634,7 @@ func (h *Head) Init(minValidTime int64) error { snapshotLoaded := false var chunkSnapshotLoadDuration time.Duration if h.opts.EnableMemorySnapshotOnShutdown { - level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") + h.logger.Info("Chunk snapshot is enabled, replaying from the snapshot") // If there are any WAL files, there should be at least one WAL file with an index that is current or newer // than the snapshot index. If the WAL index is behind the snapshot index somehow, the snapshot is assumed // to be outdated. @@ -646,14 +647,14 @@ func (h *Head) Init(minValidTime int64) error { _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil && !errors.Is(err, record.ErrNotFound) { - level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) + h.logger.Error("Could not find last snapshot", "err", err) } if err == nil && endAt < idx { loadSnapshot = false - level.Warn(h.logger).Log("msg", "Last WAL file is behind snapshot, removing snapshots") + h.logger.Warn("Last WAL file is behind snapshot, removing snapshots") if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, math.MaxInt, math.MaxInt); err != nil { - level.Error(h.logger).Log("msg", "Error while deleting snapshot directories", "err", err) + h.logger.Error("Error while deleting snapshot directories", "err", err) } } } @@ -663,14 +664,14 @@ func (h *Head) Init(minValidTime int64) error { if err == nil { snapshotLoaded = true chunkSnapshotLoadDuration = time.Since(start) - level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) + h.logger.Info("Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) } if err != nil { snapIdx, snapOffset = -1, 0 refSeries = make(map[chunks.HeadSeriesRef]*memSeries) h.metrics.snapshotReplayErrorTotal.Inc() - level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err) + h.logger.Error("Failed to load chunk snapshot", "err", err) // We clear the partially loaded data to replay fresh from the WAL. if err := h.resetInMemoryState(); err != nil { return err @@ -694,7 +695,7 @@ func (h *Head) Init(minValidTime int64) error { mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries) if err != nil { // TODO(codesome): clear out all m-map chunks here for refSeries. - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) + h.logger.Error("Loading on-disk chunks failed", "err", err) var cerr *chunks.CorruptionErr if errors.As(err, &cerr) { h.metrics.mmapChunkCorruptionTotal.Inc() @@ -711,15 +712,15 @@ func (h *Head) Init(minValidTime int64) error { } } mmapChunkReplayDuration = time.Since(mmapChunkReplayStart) - level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) + h.logger.Info("On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) } if h.wal == nil { - level.Info(h.logger).Log("msg", "WAL not found") + h.logger.Info("WAL not found") return nil } - level.Info(h.logger).Log("msg", "Replaying WAL, this may take a while") + h.logger.Info("Replaying WAL, this may take a while") checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. @@ -745,7 +746,7 @@ func (h *Head) Init(minValidTime int64) error { } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } }() @@ -756,7 +757,7 @@ func (h *Head) Init(minValidTime int64) error { } h.updateWALReplayStatusRead(startFrom) startFrom++ - level.Info(h.logger).Log("msg", "WAL checkpoint loaded") + h.logger.Info("WAL checkpoint loaded") } checkpointReplayDuration := time.Since(checkpointReplayStart) @@ -786,12 +787,12 @@ func (h *Head) Init(minValidTime int64) error { } err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } if err != nil { return err } - level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WAL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } walReplayDuration := time.Since(walReplayStart) @@ -814,12 +815,12 @@ func (h *Head) Init(minValidTime int64) error { sr := wlog.NewSegmentBufReader(s) err = h.loadWBL(wlog.NewReader(sr), syms, multiRef, lastMmapRef) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err) + h.logger.Warn("Error while closing the wbl segments reader", "err", err) } if err != nil { return &errLoadWbl{err} } - level.Info(h.logger).Log("msg", "WBL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WBL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } } @@ -828,8 +829,8 @@ func (h *Head) Init(minValidTime int64) error { totalReplayDuration := time.Since(start) h.metrics.dataTotalReplayDuration.Set(totalReplayDuration.Seconds()) - level.Info(h.logger).Log( - "msg", "WAL replay completed", + h.logger.Info( + "WAL replay completed", "checkpoint_replay_duration", checkpointReplayDuration.String(), "wal_replay_duration", walReplayDuration.String(), "wbl_replay_duration", wblReplayDuration.String(), @@ -939,28 +940,28 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) // removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously // loaded mmapped chunks. func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") // We never want to preserve the in-memory series from snapshots if we are repairing m-map chunks. if err := h.resetInMemoryState(); err != nil { return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, err } - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") if err := h.chunkDiskMapper.DeleteCorrupted(err); err != nil { - level.Info(h.logger).Log("msg", "Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) + h.logger.Info("Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed", "err", err) } return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, nil } - level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") + h.logger.Info("Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") mmappedChunks, oooMmappedChunks, lastRef, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) if err != nil { - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err) + h.logger.Error("Loading on-disk chunks failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed after failed loading", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed after failed loading", "err", err) } mmappedChunks = map[chunks.HeadSeriesRef][]*mmappedChunk{} } @@ -995,7 +996,7 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wlog.WL) { } migrated := h.exemplars.(*CircularExemplarStorage).Resize(newSize) - level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) + h.logger.Info("Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) } // SetOutOfOrderTimeWindow updates the out of order related parameters. @@ -1018,6 +1019,16 @@ func (h *Head) DisableNativeHistograms() { h.opts.EnableNativeHistograms.Store(false) } +// EnableOOONativeHistograms enables the ingestion of out-of-order native histograms. +func (h *Head) EnableOOONativeHistograms() { + h.opts.EnableOOONativeHistograms.Store(true) +} + +// DisableOOONativeHistograms disables the ingestion of out-of-order native histograms. +func (h *Head) DisableOOONativeHistograms() { + h.opts.EnableOOONativeHistograms.Store(false) +} + // PostingsCardinalityStats returns highest cardinality stats by label and value names. func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats { cacheKey := statsByLabelName + ";" + strconv.Itoa(limit) @@ -1296,7 +1307,7 @@ func (h *Head) truncateWAL(mint int64) error { // If truncating fails, we'll just try again at the next checkpoint. // Leftover segments will just be ignored in the future if there's a checkpoint // that supersedes them. - level.Error(h.logger).Log("msg", "truncating segments failed", "err", err) + h.logger.Error("truncating segments failed", "err", err) } // The checkpoint is written and segments before it is truncated, so we no @@ -1314,12 +1325,12 @@ func (h *Head) truncateWAL(mint int64) error { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher checkpoint exists. - level.Error(h.logger).Log("msg", "delete old checkpoints", "err", err) + h.logger.Error("delete old checkpoints", "err", err) h.metrics.checkpointDeleteFail.Inc() } h.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) - level.Info(h.logger).Log("msg", "WAL checkpoint complete", + h.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) return nil @@ -1357,7 +1368,7 @@ func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { start := time.Now() headMaxt := h.MaxTime() actualMint, minOOOTime, minMmapFile := h.gc() - level.Info(h.logger).Log("msg", "Head GC completed", "caller", caller, "duration", time.Since(start)) + h.logger.Info("Head GC completed", "caller", caller, "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) if actualMint > h.minTime.Load() { @@ -1509,7 +1520,7 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match series := h.series.getByID(chunks.HeadSeriesRef(p.At())) if series == nil { - level.Debug(h.logger).Log("msg", "Series not found in Head.Delete") + h.logger.Debug("Series not found in Head.Delete") continue } @@ -2066,6 +2077,17 @@ func (s sample) Type() chunkenc.ValueType { } } +func (s sample) Copy() chunks.Sample { + c := sample{t: s.t, f: s.f} + if s.h != nil { + c.h = s.h.Copy() + } + if s.fh != nil { + c.fh = s.fh.Copy() + } + return c +} + // memSeries is the in-memory representation of a series. None of its methods // are goroutine safe and it is the caller's responsibility to lock it. type memSeries struct { @@ -2090,7 +2112,7 @@ type memSeries struct { // before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5 // after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7 // - // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N + // pN is the pointer to the mmappedChunk referred to by HeadChunkID=N mmappedChunks []*mmappedChunk // Most recent chunks in memory that are still being built or waiting to be mmapped. // This is a linked list, headChunks points to the most recent chunk, headChunks.next points diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 988ce9397ee..ea2a163f261 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -17,11 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -42,6 +40,12 @@ type initAppender struct { var _ storage.GetRef = &initAppender{} +func (a *initAppender) SetOptions(opts *storage.AppendOptions) { + if a.app != nil { + a.app.SetOptions(opts) + } +} + func (a *initAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if a.app != nil { return a.app.Append(ref, lset, t, v) @@ -79,6 +83,16 @@ func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t return a.app.AppendHistogram(ref, l, t, h, fh) } +func (a *initAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + } + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) +} + func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { if a.app != nil { return a.app.UpdateMetadata(ref, l, m) @@ -318,11 +332,16 @@ type headAppender struct { appendID, cleanupAppendIDsBelow uint64 closed bool + hints *storage.AppendOptions +} + +func (a *headAppender) SetOptions(opts *storage.AppendOptions) { + a.hints = opts } func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - // For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append. - // If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work. + // Fail fast if OOO is disabled and the sample is out of bounds. + // Otherwise a full check will be done later to decide if the sample is in-order or out-of-order. if a.oooTimeWindow == 0 && t < a.minValidTime { a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Inc() return 0, storage.ErrOutOfBounds @@ -331,29 +350,38 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { var err error - s, err = a.getOrCreate(lset) + s, _, err = a.getOrCreate(lset) if err != nil { return 0, err } } + s.Lock() if value.IsStaleNaN(v) { + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we could do this conversion + // in commit. This code should move into Commit(). switch { case s.lastHistogramValue != nil: + s.Unlock() return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) case s.lastFloatHistogramValue != nil: + s.Unlock() return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) } } - s.Lock() + defer s.Unlock() // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. - _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) + isOOO, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err == nil { + if isOOO && a.hints != nil && a.hints.DiscardOutOfOrder { + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() + return 0, storage.ErrOutOfOrderSample + } s.pendingCommit = true } - s.Unlock() if delta > 0 { a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) } @@ -388,13 +416,13 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 // storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { if ct >= t { - return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + return 0, storage.ErrCTNewerThanSample } s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { var err error - s, err = a.getOrCreate(lset) + s, _, err = a.getOrCreate(lset) if err != nil { return 0, err } @@ -424,20 +452,18 @@ func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Lab return storage.SeriesRef(s.ref), nil } -func (a *headAppender) getOrCreate(lset labels.Labels) (*memSeries, error) { +func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bool, err error) { // Ensure no empty labels have gotten through. lset = lset.WithoutEmpty() if lset.IsEmpty() { - return nil, fmt.Errorf("empty labelset: %w", ErrInvalidSample) + return nil, false, fmt.Errorf("empty labelset: %w", ErrInvalidSample) } if l, dup := lset.HasDuplicateLabelNames(); dup { - return nil, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) + return nil, false, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) } - var created bool - var err error - s, created, err := a.head.getOrCreate(lset.Hash(), lset) + s, created, err = a.head.getOrCreate(lset.Hash(), lset) if err != nil { - return nil, err + return nil, false, err } if created { a.series = append(a.series, record.RefSeries{ @@ -445,12 +471,13 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (*memSeries, error) { Labels: lset, }) } - return s, nil + return s, created, nil } -// appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) -// The sample belongs to the out of order chunk if we return true and no error. -// An error signifies the sample cannot be handled. +// appendable checks whether the given sample is valid for appending to the series. +// If the sample is valid and in-order, it returns false with no error. +// If the sample belongs to the out-of-order chunk, it returns true with no error. +// If the sample cannot be handled, it returns an error. func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { // Check if we can append in the in-order chunk. if t >= minValidTime { @@ -493,46 +520,94 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi return false, headMaxt - t, storage.ErrOutOfOrderSample } -// appendableHistogram checks whether the given histogram is valid for appending to the series. -func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { - if s.headChunks == nil { - return nil +// appendableHistogram checks whether the given histogram sample is valid for appending to the series. (if we return false and no error) +// The sample belongs to the out of order chunk if we return true and no error. +// An error signifies the sample cannot be handled. +func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram, headMaxt, minValidTime, oooTimeWindow int64, oooHistogramsEnabled bool) (isOOO bool, oooDelta int64, err error) { + // Check if we can append in the in-order chunk. + if t >= minValidTime { + if s.headChunks == nil { + // The series has no sample and was freshly created. + return false, 0, nil + } + msMaxt := s.maxTime() + if t > msMaxt { + return false, 0, nil + } + if t == msMaxt { + // We are allowing exact duplicates as we can encounter them in valid cases + // like federation and erroring out at that time would be extremely noisy. + // This only checks against the latest in-order sample. + // The OOO headchunk has its own method to detect these duplicates. + if !h.Equals(s.lastHistogramValue) { + return false, 0, storage.ErrDuplicateSampleForTimestamp + } + // Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf. + return false, 0, nil + } } - if t > s.headChunks.maxTime { - return nil - } - if t < s.headChunks.maxTime { - return storage.ErrOutOfOrderSample + // The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk. + if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow { + if !oooHistogramsEnabled { + return true, headMaxt - t, storage.ErrOOONativeHistogramsDisabled + } + return true, headMaxt - t, nil } - // We are allowing exact duplicates as we can encounter them in valid cases - // like federation and erroring out at that time would be extremely noisy. - if !h.Equals(s.lastHistogramValue) { - return storage.ErrDuplicateSampleForTimestamp + // The sample cannot go in both in-order and out-of-order chunk. + if oooTimeWindow > 0 { + return true, headMaxt - t, storage.ErrTooOldSample } - return nil + if t < minValidTime { + return false, headMaxt - t, storage.ErrOutOfBounds + } + return false, headMaxt - t, storage.ErrOutOfOrderSample } -// appendableFloatHistogram checks whether the given float histogram is valid for appending to the series. -func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error { - if s.headChunks == nil { - return nil +// appendableFloatHistogram checks whether the given float histogram sample is valid for appending to the series. (if we return false and no error) +// The sample belongs to the out of order chunk if we return true and no error. +// An error signifies the sample cannot be handled. +func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram, headMaxt, minValidTime, oooTimeWindow int64, oooHistogramsEnabled bool) (isOOO bool, oooDelta int64, err error) { + // Check if we can append in the in-order chunk. + if t >= minValidTime { + if s.headChunks == nil { + // The series has no sample and was freshly created. + return false, 0, nil + } + msMaxt := s.maxTime() + if t > msMaxt { + return false, 0, nil + } + if t == msMaxt { + // We are allowing exact duplicates as we can encounter them in valid cases + // like federation and erroring out at that time would be extremely noisy. + // This only checks against the latest in-order sample. + // The OOO headchunk has its own method to detect these duplicates. + if !fh.Equals(s.lastFloatHistogramValue) { + return false, 0, storage.ErrDuplicateSampleForTimestamp + } + // Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf. + return false, 0, nil + } } - if t > s.headChunks.maxTime { - return nil - } - if t < s.headChunks.maxTime { - return storage.ErrOutOfOrderSample + // The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk. + if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow { + if !oooHistogramsEnabled { + return true, headMaxt - t, storage.ErrOOONativeHistogramsDisabled + } + return true, headMaxt - t, nil } - // We are allowing exact duplicates as we can encounter them in valid cases - // like federation and erroring out at that time would be extremely noisy. - if !fh.Equals(s.lastFloatHistogramValue) { - return storage.ErrDuplicateSampleForTimestamp + // The sample cannot go in both in-order and out-of-order chunk. + if oooTimeWindow > 0 { + return true, headMaxt - t, storage.ErrTooOldSample } - return nil + if t < minValidTime { + return false, headMaxt - t, storage.ErrOutOfBounds + } + return false, headMaxt - t, storage.ErrOutOfOrderSample } // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't @@ -577,7 +652,9 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return 0, storage.ErrNativeHistogramsDisabled } - if t < a.minValidTime { + // Fail fast if OOO is disabled and the sample is out of bounds. + // Otherwise a full check will be done later to decide if the sample is in-order or out-of-order. + if (a.oooTimeWindow == 0 || !a.head.opts.EnableOOONativeHistograms.Load()) && t < a.minValidTime { a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() return 0, storage.ErrOutOfBounds } @@ -594,50 +671,48 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels } } + var created bool s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { - // Ensure no empty labels have gotten through. - lset = lset.WithoutEmpty() - if lset.IsEmpty() { - return 0, fmt.Errorf("empty labelset: %w", ErrInvalidSample) - } - - if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) - } - - var created bool var err error - s, created, err = a.head.getOrCreate(lset.Hash(), lset) + s, created, err = a.getOrCreate(lset) if err != nil { return 0, err } - if created { - switch { - case h != nil: - s.lastHistogramValue = &histogram.Histogram{} - case fh != nil: - s.lastFloatHistogramValue = &histogram.FloatHistogram{} - } - a.series = append(a.series, record.RefSeries{ - Ref: s.ref, - Labels: lset, - }) - } } switch { case h != nil: s.Lock() - if err := s.appendableHistogram(t, h); err != nil { - s.Unlock() - if errors.Is(err, storage.ErrOutOfOrderSample) { + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastHistogramValue = &histogram.Histogram{} + } + + // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise + // to skip that sample from the WAL and write only in the WBL. + _, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + if err != nil { + s.pendingCommit = true + } + s.Unlock() + if delta > 0 { + a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) + } + if err != nil { + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + fallthrough + case errors.Is(err, storage.ErrOOONativeHistogramsDisabled): a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() + case errors.Is(err, storage.ErrTooOldSample): + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err } - s.pendingCommit = true - s.Unlock() a.histograms = append(a.histograms, record.RefHistogramSample{ Ref: s.ref, T: t, @@ -646,15 +721,35 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels a.histogramSeries = append(a.histogramSeries, s) case fh != nil: s.Lock() - if err := s.appendableFloatHistogram(t, fh); err != nil { - s.Unlock() - if errors.Is(err, storage.ErrOutOfOrderSample) { + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastFloatHistogramValue = &histogram.FloatHistogram{} + } + + // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise + // to skip that sample from the WAL and write only in the WBL. + _, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + if err == nil { + s.pendingCommit = true + } + s.Unlock() + if delta > 0 { + a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) + } + if err != nil { + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + fallthrough + case errors.Is(err, storage.ErrOOONativeHistogramsDisabled): a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() + case errors.Is(err, storage.ErrTooOldSample): + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err } - s.pendingCommit = true - s.Unlock() a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ Ref: s.ref, T: t, @@ -673,6 +768,102 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return storage.SeriesRef(s.ref), nil } +func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if !a.head.opts.EnableNativeHistograms.Load() { + return 0, storage.ErrNativeHistogramsDisabled + } + + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + + var created bool + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + var err error + s, created, err = a.getOrCreate(lset) + if err != nil { + return 0, err + } + } + + switch { + case h != nil: + zeroHistogram := &histogram.Histogram{} + s.Lock() + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastHistogramValue = zeroHistogram + } + + // Although we call `appendableHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.histograms = append(a.histograms, record.RefHistogramSample{ + Ref: s.ref, + T: ct, + H: zeroHistogram, + }) + a.histogramSeries = append(a.histogramSeries, s) + case fh != nil: + zeroFloatHistogram := &histogram.FloatHistogram{} + s.Lock() + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastFloatHistogramValue = zeroFloatHistogram + } + + // Although we call `appendableFloatHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) // OOO is not allowed for CTZeroSamples. + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ + Ref: s.ref, + T: ct, + FH: zeroFloatHistogram, + }) + a.floatHistogramSeries = append(a.floatHistogramSeries, s) + } + + if ct > a.maxt { + a.maxt = ct + } + return storage.SeriesRef(s.ref), nil +} + // UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { @@ -793,23 +984,38 @@ func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { return ret } -// Commit writes to the WAL and adds the data to the Head. -// TODO(codesome): Refactor this method to reduce indentation and make it more readable. -func (a *headAppender) Commit() (err error) { - if a.closed { - return ErrAppenderClosed - } - defer func() { a.closed = true }() - - if err := a.log(); err != nil { - _ = a.Rollback() // Most likely the same error will happen again. - return fmt.Errorf("write to WAL: %w", err) - } - - if a.head.writeNotified != nil { - a.head.writeNotified.Notify() - } +type appenderCommitContext struct { + floatsAppended int + histogramsAppended int + // Number of samples out of order but accepted: with ooo enabled and within time window. + oooFloatsAccepted int + oooHistogramAccepted int + // Number of samples rejected due to: out of order but OOO support disabled. + floatOOORejected int + histoOOORejected int + // Number of samples rejected due to: out of order but too old (OOO support enabled, but outside time window). + floatTooOldRejected int + histoTooOldRejected int + // Number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled). + floatOOBRejected int + histoOOBRejected int + inOrderMint int64 + inOrderMaxt int64 + oooMinT int64 + oooMaxT int64 + wblSamples []record.RefSample + wblHistograms []record.RefHistogramSample + wblFloatHistograms []record.RefFloatHistogramSample + oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef + oooMmapMarkersCount int + oooRecords [][]byte + oooCapMax int64 + appendChunkOpts chunkOpts + enc record.Encoder +} +// commitExemplars adds all exemplars from headAppender to the head's exemplar storage. +func (a *headAppender) commitExemplars() { // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) @@ -824,112 +1030,117 @@ func (a *headAppender) Commit() (err error) { if errors.Is(err, storage.ErrOutOfOrderExemplar) { continue } - level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) + a.head.logger.Debug("Unknown error while adding exemplar", "err", err) } } +} - defer a.head.metrics.activeAppenders.Dec() - defer a.head.putAppendBuffer(a.samples) - defer a.head.putSeriesBuffer(a.sampleSeries) - defer a.head.putExemplarBuffer(a.exemplars) - defer a.head.putHistogramBuffer(a.histograms) - defer a.head.putFloatHistogramBuffer(a.floatHistograms) - defer a.head.putMetadataBuffer(a.metadata) - defer a.head.iso.closeAppend(a.appendID) +func (acc *appenderCommitContext) collectOOORecords(a *headAppender) { + if a.head.wbl == nil { + // WBL is not enabled. So no need to collect. + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil + acc.oooMmapMarkersCount = 0 + return + } - var ( - floatsAppended = len(a.samples) - histogramsAppended = len(a.histograms) + len(a.floatHistograms) - // number of samples out of order but accepted: with ooo enabled and within time window - oooFloatsAccepted int - // number of samples rejected due to: out of order but OOO support disabled. - floatOOORejected int - histoOOORejected int - // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window) - floatTooOldRejected int - // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) - floatOOBRejected int - - inOrderMint int64 = math.MaxInt64 - inOrderMaxt int64 = math.MinInt64 - oooMinT int64 = math.MaxInt64 - oooMaxT int64 = math.MinInt64 - wblSamples []record.RefSample - oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef - oooMmapMarkersCount int - oooRecords [][]byte - oooCapMax = a.head.opts.OutOfOrderCapMax.Load() - series *memSeries - appendChunkOpts = chunkOpts{ - chunkDiskMapper: a.head.chunkDiskMapper, - chunkRange: a.head.chunkRange.Load(), - samplesPerChunk: a.head.opts.SamplesPerChunk, - } - enc record.Encoder - ) - defer func() { - for i := range oooRecords { - a.head.putBytesBuffer(oooRecords[i][:0]) - } - }() - collectOOORecords := func() { - if a.head.wbl == nil { - // WBL is not enabled. So no need to collect. - wblSamples = nil - oooMmapMarkers = nil - oooMmapMarkersCount = 0 - return - } - // The m-map happens before adding a new sample. So we collect - // the m-map markers first, and then samples. - // WBL Graphically: - // WBL Before this Commit(): [old samples before this commit for chunk 1] - // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] - if oooMmapMarkers != nil { - markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount) - for ref, mmapRefs := range oooMmapMarkers { - for _, mmapRef := range mmapRefs { - markers = append(markers, record.RefMmapMarker{ - Ref: ref, - MmapRef: mmapRef, - }) - } + // The m-map happens before adding a new sample. So we collect + // the m-map markers first, and then samples. + // WBL Graphically: + // WBL Before this Commit(): [old samples before this commit for chunk 1] + // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] + if acc.oooMmapMarkers != nil { + markers := make([]record.RefMmapMarker, 0, acc.oooMmapMarkersCount) + for ref, mmapRefs := range acc.oooMmapMarkers { + for _, mmapRef := range mmapRefs { + markers = append(markers, record.RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) } - r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) } + r := acc.enc.MmapMarkers(markers, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } - if len(wblSamples) > 0 { - r := enc.Samples(wblSamples, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } + if len(acc.wblSamples) > 0 { + r := acc.enc.Samples(acc.wblSamples, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblHistograms) > 0 { + r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblFloatHistograms) > 0 { + r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil +} - wblSamples = nil - oooMmapMarkers = nil +// handleAppendableError processes errors encountered during sample appending and updates +// the provided counters accordingly. +// +// Parameters: +// - err: The error encountered during appending. +// - appended: Pointer to the counter tracking the number of successfully appended samples. +// - oooRejected: Pointer to the counter tracking the number of out-of-order samples rejected. +// - oobRejected: Pointer to the counter tracking the number of out-of-bounds samples rejected. +// - tooOldRejected: Pointer to the counter tracking the number of too-old samples rejected. +func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOldRejected *int) { + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + *appended-- + *oooRejected++ + case errors.Is(err, storage.ErrOutOfBounds): + *appended-- + *oobRejected++ + case errors.Is(err, storage.ErrTooOldSample): + *appended-- + *tooOldRejected++ + default: + *appended-- } +} + +// commitSamples processes and commits the samples in the headAppender to the series. +// It handles both in-order and out-of-order samples, updating the appenderCommitContext +// with the results of the append operations. +// +// The function iterates over the samples in the headAppender and attempts to append each sample +// to its corresponding series. It handles various error cases such as out-of-order samples, +// out-of-bounds samples, and too-old samples, updating the appenderCommitContext accordingly. +// +// For out-of-order samples, it checks if the sample can be inserted into the series and updates +// the out-of-order mmap markers if necessary. It also updates the write-ahead log (WBL) samples +// and the minimum and maximum timestamps for out-of-order samples. +// +// For in-order samples, it attempts to append the sample to the series and updates the minimum +// and maximum timestamps for in-order samples. +// +// The function also increments the chunk metrics if a new chunk is created and performs cleanup +// operations on the series after appending the samples. +// +// There are also specific functions to commit histograms and float histograms. +func (a *headAppender) commitSamples(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries + for i, s := range a.samples { series = a.sampleSeries[i] series.Lock() oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - floatsAppended-- - floatOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - floatsAppended-- - floatOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - floatsAppended-- - floatTooOldRejected++ - default: - floatsAppended-- + if err != nil { + handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected) } - var ok, chunkCreated bool - switch { case err != nil: // Do nothing here. @@ -937,9 +1148,9 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) if chunkCreated { - r, ok := oooMmapMarkers[series.ref] + r, ok := acc.oooMmapMarkers[series.ref] if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -947,49 +1158,49 @@ func (a *headAppender) Commit() (err error) { // r != nil means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. - collectOOORecords() + acc.collectOOORecords(a) } - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) } if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) } else { // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ } } if ok { - wblSamples = append(wblSamples, s) - if s.T < oooMinT { - oooMinT = s.T + acc.wblSamples = append(acc.wblSamples, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T } - if s.T > oooMaxT { - oooMaxT = s.T + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T } - oooFloatsAccepted++ + acc.oooFloatsAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, // not with samples in already flushed OOO chunks. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - floatsAppended-- + acc.floatsAppended-- } default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts) + ok, chunkCreated = series.append(s.T, s.V, a.appendID, acc.appendChunkOpts) if ok { - if s.T < inOrderMint { - inOrderMint = s.T + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T } } else { // The sample is an exact duplicate, and should be silently dropped. - floatsAppended-- + acc.floatsAppended-- } } @@ -1002,89 +1213,278 @@ func (a *headAppender) Commit() (err error) { series.pendingCommit = false series.Unlock() } +} + +// For details on the commitHistograms function, see the commitSamples docs. +func (a *headAppender) commitHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries for i, s := range a.histograms { series = a.histogramSeries[i] series.Lock() - ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts) - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - if ok { - if s.T < inOrderMint { - inOrderMint = s.T + oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) + } + + switch { + case err != nil: + // Do nothing here. + case oooSample: + // Sample is OOO and OOO handling is enabled + // and the delta is within the OOO tolerance. + var mmapRefs []chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) + if chunkCreated { + r, ok := acc.oooMmapMarkers[series.ref] + if !ok || r != nil { + // !ok means there are no markers collected for these samples yet. So we first flush the samples + // before setting this m-map marker. + + // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). + // Hence, before we m-map again, we should add the samples and m-map markers + // seen till now to the WBL records. + acc.collectOOORecords(a) + } + + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + } + if len(mmapRefs) > 0 { + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) + } else { + // No chunk was written to disk, so we need to set an initial marker for this series. + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ + } + } + if ok { + acc.wblHistograms = append(acc.wblHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T + } + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T + } + acc.oooHistogramAccepted++ + } else { + // Sample is an exact duplicate of the last sample. + // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, + // not with samples in already flushed OOO chunks. + // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. + acc.histogramsAppended-- } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + default: + ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, acc.appendChunkOpts) + if ok { + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T + } + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T + } + } else { + acc.histogramsAppended-- + acc.histoOOORejected++ } - } else { - histogramsAppended-- - histoOOORejected++ } + if chunkCreated { a.head.metrics.chunks.Inc() a.head.metrics.chunksCreated.Inc() } + + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() } +} + +// For details on the commitFloatHistograms function, see the commitSamples docs. +func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries for i, s := range a.floatHistograms { series = a.floatHistogramSeries[i] series.Lock() - ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts) - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - if ok { - if s.T < inOrderMint { - inOrderMint = s.T + oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) + } + + switch { + case err != nil: + // Do nothing here. + case oooSample: + // Sample is OOO and OOO handling is enabled + // and the delta is within the OOO tolerance. + var mmapRefs []chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) + if chunkCreated { + r, ok := acc.oooMmapMarkers[series.ref] + if !ok || r != nil { + // !ok means there are no markers collected for these samples yet. So we first flush the samples + // before setting this m-map marker. + + // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). + // Hence, before we m-map again, we should add the samples and m-map markers + // seen till now to the WBL records. + acc.collectOOORecords(a) + } + + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + } + if len(mmapRefs) > 0 { + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) + } else { + // No chunk was written to disk, so we need to set an initial marker for this series. + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ + } } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if ok { + acc.wblFloatHistograms = append(acc.wblFloatHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T + } + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T + } + acc.oooHistogramAccepted++ + } else { + // Sample is an exact duplicate of the last sample. + // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, + // not with samples in already flushed OOO chunks. + // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. + acc.histogramsAppended-- + } + default: + ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, acc.appendChunkOpts) + if ok { + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T + } + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T + } + } else { + acc.histogramsAppended-- + acc.histoOOORejected++ } - } else { - histogramsAppended-- - histoOOORejected++ } + if chunkCreated { a.head.metrics.chunks.Inc() a.head.metrics.chunksCreated.Inc() } + + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() } +} +// commitMetadata commits the metadata for each series in the headAppender. +// It iterates over the metadata slice and updates the corresponding series +// with the new metadata information. The series is locked during the update +// to ensure thread safety. +func (a *headAppender) commitMetadata() { + var series *memSeries for i, m := range a.metadata { series = a.metadataSeries[i] series.Lock() series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} series.Unlock() } +} + +// Commit writes to the WAL and adds the data to the Head. +// TODO(codesome): Refactor this method to reduce indentation and make it more readable. +func (a *headAppender) Commit() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() + + if err := a.log(); err != nil { + _ = a.Rollback() // Most likely the same error will happen again. + return fmt.Errorf("write to WAL: %w", err) + } + + if a.head.writeNotified != nil { + a.head.writeNotified.Notify() + } + + a.commitExemplars() + + defer a.head.metrics.activeAppenders.Dec() + defer a.head.putAppendBuffer(a.samples) + defer a.head.putSeriesBuffer(a.sampleSeries) + defer a.head.putExemplarBuffer(a.exemplars) + defer a.head.putHistogramBuffer(a.histograms) + defer a.head.putFloatHistogramBuffer(a.floatHistograms) + defer a.head.putMetadataBuffer(a.metadata) + defer a.head.iso.closeAppend(a.appendID) + + acc := &appenderCommitContext{ + floatsAppended: len(a.samples), + histogramsAppended: len(a.histograms) + len(a.floatHistograms), + inOrderMint: math.MaxInt64, + inOrderMaxt: math.MinInt64, + oooMinT: math.MaxInt64, + oooMaxT: math.MinInt64, + oooCapMax: a.head.opts.OutOfOrderCapMax.Load(), + appendChunkOpts: chunkOpts{ + chunkDiskMapper: a.head.chunkDiskMapper, + chunkRange: a.head.chunkRange.Load(), + samplesPerChunk: a.head.opts.SamplesPerChunk, + }, + } - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOORejected)) - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histoOOORejected)) - a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOBRejected)) - a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted)) - a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) - a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT) + defer func() { + for i := range acc.oooRecords { + a.head.putBytesBuffer(acc.oooRecords[i][:0]) + } + }() - collectOOORecords() + a.commitSamples(acc) + a.commitHistograms(acc) + a.commitFloatHistograms(acc) + a.commitMetadata() + + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected)) + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) + a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) + a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) + + acc.collectOOORecords(a) if a.head.wbl != nil { - if err := a.head.wbl.Log(oooRecords...); err != nil { + if err := a.head.wbl.Log(acc.oooRecords...); err != nil { // TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging // until we have found what samples become OOO. We can try having a metric for this failure. // Returning the error here is not correct because we have already put the samples into the memory, // hence the append/insert was a success. - level.Error(a.head.logger).Log("msg", "Failed to log out of order samples into the WAL", "err", err) + a.head.logger.Error("Failed to log out of order samples into the WAL", "err", err) } } return nil } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger *slog.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } @@ -1117,7 +1517,7 @@ type chunkOpts struct { // append adds the sample (t, v) to the series. The caller also has to provide // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) -// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. +// Series lock must be held when calling. func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o) if !sampleInOrder { @@ -1446,7 +1846,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. // The caller must ensure that s is locked and s.ooo is not nil. -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger) s.ooo.oooHeadChunk = &oooHeadChunk{ @@ -1459,7 +1859,7 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk } // s must be locked when calling. -func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef { +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) []chunks.ChunkDiskMapperRef { if s.ooo == nil || s.ooo.oooHeadChunk == nil { // OOO is not enabled or there is no head chunk, so nothing to m-map here. return nil @@ -1469,13 +1869,13 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap handleChunkWriteError(err) return nil } - chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1) + chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, len(chks)) for _, memchunk := range chks { if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) { - level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String()) + logger.Error("Too many OOO chunks, dropping data", "series", s.lset.String()) break } - chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError) + chunkRef := chunkDiskMapper.WriteChunk(s.ref, memchunk.minTime, memchunk.maxTime, memchunk.chunk, true, handleChunkWriteError) chunkRefs = append(chunkRefs, chunkRef) s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{ ref: chunkRef, diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go index a037948100a..dc682602b12 100644 --- a/tsdb/head_bench_test.go +++ b/tsdb/head_bench_test.go @@ -14,15 +14,22 @@ package tsdb import ( + "context" "errors" + "fmt" + "math/rand" "strconv" "testing" "github.com/stretchr/testify/require" "go.uber.org/atomic" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/wlog" ) func BenchmarkHeadStripeSeriesCreate(b *testing.B) { @@ -79,6 +86,86 @@ func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) { } } +func BenchmarkHead_WalCommit(b *testing.B) { + seriesCounts := []int{100, 1000, 10000} + series := genSeries(10000, 10, 0, 0) // Only using the generated labels. + + appendSamples := func(b *testing.B, app storage.Appender, seriesCount int, ts int64) { + var err error + for i, s := range series[:seriesCount] { + var ref storage.SeriesRef + // if i is even, append a sample, else append a histogram. + if i%2 == 0 { + ref, err = app.Append(ref, s.Labels(), ts, float64(ts)) + } else { + h := &histogram.Histogram{ + Count: 7 + uint64(ts*5), + ZeroCount: 2 + uint64(ts), + ZeroThreshold: 0.001, + Sum: 18.4 * rand.Float64(), + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{ts + 1, 1, -1, 0}, + } + ref, err = app.AppendHistogram(ref, s.Labels(), ts, h, nil) + } + require.NoError(b, err) + + _, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{ + Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())), + Value: rand.Float64(), + Ts: ts, + }) + require.NoError(b, err) + } + } + + for _, seriesCount := range seriesCounts { + b.Run(fmt.Sprintf("%d series", seriesCount), func(b *testing.B) { + for _, commits := range []int64{1, 2} { // To test commits that create new series and when the series already exists. + b.Run(fmt.Sprintf("%d commits", commits), func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + h, w := newTestHead(b, 10000, wlog.CompressionNone, false) + b.Cleanup(func() { + if h != nil { + h.Close() + } + if w != nil { + w.Close() + } + }) + app := h.Appender(context.Background()) + + appendSamples(b, app, seriesCount, 0) + + b.StartTimer() + require.NoError(b, app.Commit()) + if commits == 2 { + b.StopTimer() + app = h.Appender(context.Background()) + appendSamples(b, app, seriesCount, 1) + b.StartTimer() + require.NoError(b, app.Commit()) + } + b.StopTimer() + h.Close() + h = nil + w.Close() + w = nil + } + }) + } + }) + } +} + type failingSeriesLifecycleCallback struct{} func (failingSeriesLifecycleCallback) PreCreation(labels.Labels) error { return errors.New("failed") } diff --git a/tsdb/head_dedupelabels.go b/tsdb/head_dedupelabels.go index a16d9072612..a75f3372245 100644 --- a/tsdb/head_dedupelabels.go +++ b/tsdb/head_dedupelabels.go @@ -16,8 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -31,8 +30,8 @@ func (s *memSeries) labels() labels.Labels { // RebuildSymbolTable goes through all the series in h, build a SymbolTable with all names and values, // replace each series' Labels with one using that SymbolTable. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { - level.Info(logger).Log("msg", "RebuildSymbolTable starting") +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { + logger.Info("RebuildSymbolTable starting") st := labels.NewSymbolTable() builder := labels.NewScratchBuilderWithSymbolTable(st, 0) rebuildLabels := func(lbls labels.Labels) labels.Labels { @@ -66,7 +65,7 @@ func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { if e, ok := h.exemplars.(withReset); ok { e.ResetSymbolTable(st) } - level.Info(logger).Log("msg", "RebuildSymbolTable finished", "size", st.Len()) + logger.Info("RebuildSymbolTable finished", "size", st.Len()) return st } diff --git a/tsdb/head_other.go b/tsdb/head_other.go index fea91530dc7..c73872c12e1 100644 --- a/tsdb/head_other.go +++ b/tsdb/head_other.go @@ -16,7 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels { } // RebuildSymbolTable is a no-op when not using dedupelabels. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { return nil } diff --git a/tsdb/head_read.go b/tsdb/head_read.go index d81ffbb6a03..29adc3ee740 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -21,8 +21,6 @@ import ( "slices" "sync" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -132,7 +130,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") } else { series = append(series, s) } @@ -165,7 +163,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") continue } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 7b5349cfcaa..cc9daa97fe1 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -23,6 +23,7 @@ import ( "path" "path/filepath" "reflect" + "slices" "sort" "strconv" "strings" @@ -1060,7 +1061,7 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) { tests := []struct { name string - headChunks int // the number of head chubks to create on memSeries by appending enough samples + headChunks int // the number of head chunks to create on memSeries by appending enough samples mmappedChunks int // the number of mmapped chunks to create on memSeries by appending enough samples truncateBefore int64 // the mint to pass to truncateChunksBefore() expectedTruncated int // the number of chunks that we're expecting be truncated and returned by truncateChunksBefore() @@ -2101,6 +2102,36 @@ func TestHead_LogRollback(t *testing.T) { } } +func TestHead_ReturnsSortedLabelValues(t *testing.T) { + h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) + defer func() { + require.NoError(t, h.Close()) + }() + + h.initTime(0) + + app := h.appender() + for i := 100; i > 0; i-- { + for j := 0; j < 10; j++ { + lset := labels.FromStrings( + "__name__", fmt.Sprintf("metric_%d", i), + "label", fmt.Sprintf("value_%d", j), + ) + _, err := app.Append(0, lset, 2100, 1) + require.NoError(t, err) + } + } + + q, err := NewBlockQuerier(h, 1500, 2500) + require.NoError(t, err) + + res, _, err := q.LabelValues(context.Background(), "__name__", nil) + require.NoError(t, err) + + require.True(t, slices.IsSorted(res)) + require.NoError(t, q.Close()) +} + // TestWalRepair_DecodingError ensures that a repair is run for an error // when decoding a record. func TestWalRepair_DecodingError(t *testing.T) { @@ -2383,8 +2414,7 @@ func TestAddDuplicateLabelName(t *testing.T) { add := func(labels labels.Labels, labelName string) { app := h.Appender(context.Background()) _, err := app.Append(0, labels, 0, 0) - require.Error(t, err) - require.Equal(t, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName), err.Error()) + require.EqualError(t, err, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName)) } add(labels.FromStrings("a", "c", "a", "b"), "a") @@ -2692,15 +2722,32 @@ func TestIsolationWithoutAdd(t *testing.T) { func TestOutOfOrderSamplesMetric(t *testing.T) { for name, scenario := range sampleTypeScenarios { t.Run(name, func(t *testing.T) { - testOutOfOrderSamplesMetric(t, scenario) + options := DefaultOptions() + options.EnableNativeHistograms = true + options.EnableOOONativeHistograms = true + testOutOfOrderSamplesMetric(t, scenario, options, storage.ErrOutOfOrderSample) }) } } -func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) { - dir := t.TempDir() +func TestOutOfOrderSamplesMetricNativeHistogramOOODisabled(t *testing.T) { + for name, scenario := range sampleTypeScenarios { + if scenario.sampleType != "histogram" { + continue + } + t.Run(name, func(t *testing.T) { + options := DefaultOptions() + options.OutOfOrderTimeWindow = (1000 * time.Minute).Milliseconds() + options.EnableNativeHistograms = true + options.EnableOOONativeHistograms = false + testOutOfOrderSamplesMetric(t, scenario, options, storage.ErrOOONativeHistogramsDisabled) + }) + } +} - db, err := Open(dir, nil, nil, DefaultOptions(), nil) +func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario, options *Options, expectOutOfOrderError error) { + dir := t.TempDir() + db, err := Open(dir, nil, nil, options, nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) @@ -2724,15 +2771,15 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) { require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))) app = db.Appender(ctx) _, err = appendSample(app, 2) - require.Equal(t, storage.ErrOutOfOrderSample, err) + require.Equal(t, expectOutOfOrderError, err) require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))) _, err = appendSample(app, 3) - require.Equal(t, storage.ErrOutOfOrderSample, err) + require.Equal(t, expectOutOfOrderError, err) require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))) _, err = appendSample(app, 4) - require.Equal(t, storage.ErrOutOfOrderSample, err) + require.Equal(t, expectOutOfOrderError, err) require.Equal(t, 3.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))) require.NoError(t, app.Commit()) @@ -2767,15 +2814,15 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) { // Test out of order metric. app = db.Appender(ctx) _, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+2) - require.Equal(t, storage.ErrOutOfOrderSample, err) + require.Equal(t, expectOutOfOrderError, err) require.Equal(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))) _, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+3) - require.Equal(t, storage.ErrOutOfOrderSample, err) + require.Equal(t, expectOutOfOrderError, err) require.Equal(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))) _, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+4) - require.Equal(t, storage.ErrOutOfOrderSample, err) + require.Equal(t, expectOutOfOrderError, err) require.Equal(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))) require.NoError(t, app.Commit()) } @@ -4626,10 +4673,172 @@ func TestHistogramCounterResetHeader(t *testing.T) { } } +func TestOOOHistogramCounterResetHeaders(t *testing.T) { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { + l := labels.FromStrings("a", "b") + head, _ := newTestHead(t, 1000, wlog.CompressionNone, true) + head.opts.OutOfOrderCapMax.Store(5) + head.opts.EnableOOONativeHistograms.Store(true) + + t.Cleanup(func() { + require.NoError(t, head.Close()) + }) + require.NoError(t, head.Init(0)) + + appendHistogram := func(ts int64, h *histogram.Histogram) { + app := head.Appender(context.Background()) + var err error + if floatHisto { + _, err = app.AppendHistogram(0, l, ts, nil, h.ToFloat(nil)) + } else { + _, err = app.AppendHistogram(0, l, ts, h.Copy(), nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + type expOOOMmappedChunks struct { + header chunkenc.CounterResetHeader + mint, maxt int64 + numSamples uint16 + } + + var expChunks []expOOOMmappedChunks + checkOOOExpCounterResetHeader := func(newChunks ...expOOOMmappedChunks) { + expChunks = append(expChunks, newChunks...) + + ms, _, err := head.getOrCreate(l.Hash(), l) + require.NoError(t, err) + + require.Len(t, ms.ooo.oooMmappedChunks, len(expChunks)) + + for i, mmapChunk := range ms.ooo.oooMmappedChunks { + chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref) + require.NoError(t, err) + if floatHisto { + require.Equal(t, expChunks[i].header, chk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader()) + } else { + require.Equal(t, expChunks[i].header, chk.(*chunkenc.HistogramChunk).GetCounterResetHeader()) + } + require.Equal(t, expChunks[i].mint, mmapChunk.minTime) + require.Equal(t, expChunks[i].maxt, mmapChunk.maxTime) + require.Equal(t, expChunks[i].numSamples, mmapChunk.numSamples) + } + } + + // Append an in-order histogram, so the rest of the samples can be detected as OOO. + appendHistogram(1000, tsdbutil.GenerateTestHistogram(1000)) + + // OOO histogram + for i := 1; i <= 5; i++ { + appendHistogram(100+int64(i), tsdbutil.GenerateTestHistogram(1000+i)) + } + // Nothing mmapped yet. + checkOOOExpCounterResetHeader() + + // 6th observation (which triggers a head chunk mmapping). + appendHistogram(int64(112), tsdbutil.GenerateTestHistogram(1002)) + + // One mmapped chunk with (ts, val) [(101, 1001), (102, 1002), (103, 1003), (104, 1004), (105, 1005)]. + checkOOOExpCounterResetHeader(expOOOMmappedChunks{ + header: chunkenc.UnknownCounterReset, + mint: 101, + maxt: 105, + numSamples: 5, + }) + + // Add more samples, there's a counter reset at ts 122. + appendHistogram(int64(110), tsdbutil.GenerateTestHistogram(1001)) + appendHistogram(int64(124), tsdbutil.GenerateTestHistogram(904)) + appendHistogram(int64(123), tsdbutil.GenerateTestHistogram(903)) + appendHistogram(int64(122), tsdbutil.GenerateTestHistogram(902)) + + // New samples not mmapped yet. + checkOOOExpCounterResetHeader() + + // 11th observation (which triggers another head chunk mmapping). + appendHistogram(int64(200), tsdbutil.GenerateTestHistogram(2000)) + + // Two new mmapped chunks [(110, 1001), (112, 1002)], [(122, 902), (123, 903), (124, 904)]. + checkOOOExpCounterResetHeader( + expOOOMmappedChunks{ + header: chunkenc.UnknownCounterReset, + mint: 110, + maxt: 112, + numSamples: 2, + }, + expOOOMmappedChunks{ + header: chunkenc.CounterReset, + mint: 122, + maxt: 124, + numSamples: 3, + }, + ) + + // Count is lower than previous sample at ts 200, and NotCounterReset is always ignored on append. + appendHistogram(int64(205), tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(1000))) + + appendHistogram(int64(210), tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(2010))) + + appendHistogram(int64(220), tsdbutil.GenerateTestHistogram(2020)) + + appendHistogram(int64(215), tsdbutil.GenerateTestHistogram(2005)) + + // 16th observation (which triggers another head chunk mmapping). + appendHistogram(int64(350), tsdbutil.GenerateTestHistogram(4000)) + + // Four new mmapped chunks: [(200, 2000)] [(205, 1000)], [(210, 2010)], [(215, 2015), (220, 2020)] + checkOOOExpCounterResetHeader( + expOOOMmappedChunks{ + header: chunkenc.UnknownCounterReset, + mint: 200, + maxt: 200, + numSamples: 1, + }, + expOOOMmappedChunks{ + header: chunkenc.CounterReset, + mint: 205, + maxt: 205, + numSamples: 1, + }, + expOOOMmappedChunks{ + header: chunkenc.CounterReset, + mint: 210, + maxt: 210, + numSamples: 1, + }, + expOOOMmappedChunks{ + header: chunkenc.CounterReset, + mint: 215, + maxt: 220, + numSamples: 2, + }, + ) + + // Adding five more samples (21 in total), so another mmapped chunk is created. + appendHistogram(300, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(3000))) + + for i := 1; i <= 4; i++ { + appendHistogram(300+int64(i), tsdbutil.GenerateTestHistogram(3000+i)) + } + + // One mmapped chunk with (ts, val) [(300, 3000), (301, 3001), (302, 3002), (303, 3003), (350, 4000)]. + checkOOOExpCounterResetHeader(expOOOMmappedChunks{ + header: chunkenc.CounterReset, + mint: 300, + maxt: 350, + numSamples: 5, + }) + }) + } +} + func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { dir := t.TempDir() opts := DefaultOptions() opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true db, err := Open(dir, nil, nil, opts, nil) require.NoError(t, err) t.Cleanup(func() { @@ -4900,6 +5109,8 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) { opts.ChunkRange = 1000 opts.ChunkDirRoot = dir opts.OutOfOrderTimeWindow.Store(30 * time.Minute.Milliseconds()) + opts.EnableNativeHistograms.Store(true) + opts.EnableOOONativeHistograms.Store(true) h, err := NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -4909,13 +5120,12 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) { l := labels.FromStrings("foo", "bar") appendSample := func(mins int64, val float64, isOOO bool) { app := h.Appender(context.Background()) - ts, v := mins*time.Minute.Milliseconds(), val - _, err := app.Append(0, l, ts, v) + _, s, err := scenario.appendFunc(app, l, mins*time.Minute.Milliseconds(), mins) require.NoError(t, err) require.NoError(t, app.Commit()) if isOOO { - expOOOSamples = append(expOOOSamples, sample{t: ts, f: v}) + expOOOSamples = append(expOOOSamples, s) } } @@ -4968,7 +5178,7 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) { // Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers // from being factored in to the sample comparison // TODO(fionaliao): understand counter reset behaviour, might want to modify this later - requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, true) + requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, requireEqualSamplesIgnoreCounterResets) require.NoError(t, h.Close()) } @@ -4994,6 +5204,8 @@ func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) { opts.ChunkDirRoot = dir opts.OutOfOrderCapMax.Store(30) opts.OutOfOrderTimeWindow.Store(1000 * time.Minute.Milliseconds()) + opts.EnableNativeHistograms.Store(true) + opts.EnableOOONativeHistograms.Store(true) h, err := NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -5295,6 +5507,8 @@ func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Ap opts.ChunkDirRoot = dir opts.OutOfOrderCapMax.Store(30) opts.OutOfOrderTimeWindow.Store(120 * time.Minute.Milliseconds()) + opts.EnableNativeHistograms.Store(true) + opts.EnableOOONativeHistograms.Store(true) h, err := NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -5368,7 +5582,9 @@ func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Ap func TestHeadMinOOOTimeUpdate(t *testing.T) { for name, scenario := range sampleTypeScenarios { t.Run(name, func(t *testing.T) { - testHeadMinOOOTimeUpdate(t, scenario) + if scenario.sampleType == sampleMetricTypeFloat { + testHeadMinOOOTimeUpdate(t, scenario) + } }) } } @@ -5383,6 +5599,8 @@ func testHeadMinOOOTimeUpdate(t *testing.T, scenario sampleTypeScenario) { opts := DefaultHeadOptions() opts.ChunkDirRoot = dir opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds()) + opts.EnableNativeHistograms.Store(true) + opts.EnableOOONativeHistograms.Store(true) h, err := NewHead(nil, nil, wal, oooWlog, opts, nil) require.NoError(t, err) @@ -6062,11 +6280,15 @@ func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0)) } -func TestHeadAppender_AppendCTZeroSample(t *testing.T) { +func TestHeadAppender_AppendCT(t *testing.T) { + testHistogram := tsdbutil.GenerateTestHistogram(1) + testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1) type appendableSamples struct { - ts int64 - val float64 - ct int64 + ts int64 + fSample float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + ct int64 } for _, tc := range []struct { name string @@ -6074,20 +6296,54 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { expectedSamples []chunks.Sample }{ { - name: "In order ct+normal sample", + name: "In order ct+normal sample/floatSample", appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 1}, }, expectedSamples: []chunks.Sample{ sample{t: 1, f: 0}, sample{t: 100, f: 10}, + sample{t: 101, f: 10}, + }, + }, + { + name: "In order ct+normal sample/histogram", + appendableSamples: []appendableSamples{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 1}, }, + expectedSamples: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), }, { - name: "Consecutive appends with same ct ignore ct", + name: "In order ct+normal sample/floathistogram", appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 101, val: 10, ct: 1}, + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 1}, + }, + expectedSamples: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with same ct ignore ct/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 1}, }, expectedSamples: []chunks.Sample{ sample{t: 1, f: 0}, @@ -6096,10 +6352,42 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { }, }, { - name: "Consecutive appends with newer ct do not ignore ct", + name: "Consecutive appends with same ct ignore ct/histogram", + appendableSamples: []appendableSamples{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 1}, + }, + expectedSamples: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with same ct ignore ct/floathistogram", + appendableSamples: []appendableSamples{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 1}, + }, + expectedSamples: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, + { + name: "Consecutive appends with newer ct do not ignore ct/floatSample", appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 102, val: 10, ct: 101}, + {ts: 100, fSample: 10, ct: 1}, + {ts: 102, fSample: 10, ct: 101}, }, expectedSamples: []chunks.Sample{ sample{t: 1, f: 0}, @@ -6109,10 +6397,36 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { }, }, { - name: "CT equals to previous sample timestamp is ignored", + name: "Consecutive appends with newer ct do not ignore ct/histogram", + appendableSamples: []appendableSamples{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 102, h: testHistogram, ct: 101}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 102, h: testHistogram}, + }, + }, + { + name: "Consecutive appends with newer ct do not ignore ct/floathistogram", appendableSamples: []appendableSamples{ - {ts: 100, val: 10, ct: 1}, - {ts: 101, val: 10, ct: 100}, + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 102, fh: testFloatHistogram, ct: 101}, + }, + expectedSamples: []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 102, fh: testFloatHistogram}, + }, + }, + { + name: "CT equals to previous sample timestamp is ignored/floatSample", + appendableSamples: []appendableSamples{ + {ts: 100, fSample: 10, ct: 1}, + {ts: 101, fSample: 10, ct: 100}, }, expectedSamples: []chunks.Sample{ sample{t: 1, f: 0}, @@ -6120,6 +6434,38 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { sample{t: 101, f: 10}, }, }, + { + name: "CT equals to previous sample timestamp is ignored/histogram", + appendableSamples: []appendableSamples{ + {ts: 100, h: testHistogram, ct: 1}, + {ts: 101, h: testHistogram, ct: 100}, + }, + expectedSamples: func() []chunks.Sample { + hNoCounterReset := *testHistogram + hNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, h: &histogram.Histogram{}}, + sample{t: 100, h: testHistogram}, + sample{t: 101, h: &hNoCounterReset}, + } + }(), + }, + { + name: "CT equals to previous sample timestamp is ignored/floathistogram", + appendableSamples: []appendableSamples{ + {ts: 100, fh: testFloatHistogram, ct: 1}, + {ts: 101, fh: testFloatHistogram, ct: 100}, + }, + expectedSamples: func() []chunks.Sample { + fhNoCounterReset := *testFloatHistogram + fhNoCounterReset.CounterResetHint = histogram.NotCounterReset + return []chunks.Sample{ + sample{t: 1, fh: &histogram.FloatHistogram{}}, + sample{t: 100, fh: testFloatHistogram}, + sample{t: 101, fh: &fhNoCounterReset}, + } + }(), + }, } { t.Run(tc.name, func(t *testing.T) { h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) @@ -6129,10 +6475,21 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { a := h.Appender(context.Background()) lbls := labels.FromStrings("foo", "bar") for _, sample := range tc.appendableSamples { - _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) - require.NoError(t, err) - _, err = a.Append(0, lbls, sample.ts, sample.val) - require.NoError(t, err) + // Append float if it's a float test case + if sample.fSample != 0 { + _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) + require.NoError(t, err) + _, err = a.Append(0, lbls, sample.ts, sample.fSample) + require.NoError(t, err) + } + + // Append histograms if it's a histogram test case + if sample.h != nil || sample.fh != nil { + ref, err := a.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh) + require.NoError(t, err) + _, err = a.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh) + require.NoError(t, err) + } } require.NoError(t, a.Commit()) @@ -6167,3 +6524,60 @@ func (c *countSeriesLifecycleCallback) PostCreation(labels.Labels) { c.crea func (c *countSeriesLifecycleCallback) PostDeletion(s map[chunks.HeadSeriesRef]labels.Labels) { c.deleted.Add(int64(len(s))) } + +// Regression test for data race https://github.com/prometheus/prometheus/issues/15139. +func TestHeadAppendHistogramAndCommitConcurrency(t *testing.T) { + h := tsdbutil.GenerateTestHistogram(1) + fh := tsdbutil.GenerateTestFloatHistogram(1) + + testCases := map[string]func(storage.Appender, int) error{ + "integer histogram": func(app storage.Appender, i int) error { + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar", "serial", strconv.Itoa(i)), 1, h, nil) + return err + }, + "float histogram": func(app storage.Appender, i int) error { + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar", "serial", strconv.Itoa(i)), 1, nil, fh) + return err + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + testHeadAppendHistogramAndCommitConcurrency(t, tc) + }) + } +} + +func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(storage.Appender, int) error) { + head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) + defer func() { + require.NoError(t, head.Close()) + }() + + wg := sync.WaitGroup{} + wg.Add(2) + + // How this works: Commit() should be atomic, thus one of the commits will + // be first and the other second. The first commit will create a new series + // and write a sample. The second commit will see an exact duplicate sample + // which it should ignore. Unless there's a race that causes the + // memSeries.lastHistogram to be corrupt and fail the duplicate check. + go func() { + defer wg.Done() + for i := 0; i < 10000; i++ { + app := head.Appender(context.Background()) + require.NoError(t, appendFn(app, i)) + require.NoError(t, app.Commit()) + } + }() + + go func() { + defer wg.Done() + for i := 0; i < 10000; i++ { + app := head.Appender(context.Background()) + require.NoError(t, appendFn(app, i)) + require.NoError(t, app.Commit()) + } + }() + + wg.Wait() +} diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index ef96b533050..8103926dc62 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -24,7 +24,6 @@ import ( "sync" "time" - "github.com/go-kit/log/level" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -128,7 +127,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) { - level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) + h.logger.Warn("Unexpected error when replaying WAL on exemplar record", "err", err) } } }(exemplarsInput) @@ -421,8 +420,8 @@ Outer: } if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { - level.Warn(h.logger).Log( - "msg", "Unknown series references", + h.logger.Warn( + "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "histograms", unknownHistogramRefs.Load(), @@ -430,7 +429,7 @@ Outer: ) } if count := mmapOverlappingChunks.Load(); count > 0 { - level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count) + h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count) } return nil } @@ -446,8 +445,8 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m mmc[0].minTime, mmc[len(mmc)-1].maxTime, ) { - level.Debug(h.logger).Log( - "msg", "M-mapped chunks overlap on a duplicate series record", + h.logger.Debug( + "M-mapped chunks overlap on a duplicate series record", "series", mSeries.labels().String(), "oldref", mSeries.ref, "oldmint", mSeries.mmappedChunks[0].minTime, @@ -646,9 +645,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp } func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { - // Track number of samples, m-map markers, that referenced a series we don't know about + // Track number of samples, histogram samples, m-map markers, that referenced a series we don't know about // for error reporting. - var unknownRefs, mmapMarkerUnknownRefs atomic.Uint64 + var unknownRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64 lastSeq, lastOff := lastMmapRef.Unpack() // Start workers that each process samples for a partition of the series ID space. @@ -657,8 +656,9 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch concurrency = h.opts.WALReplayConcurrency processors = make([]wblSubsetProcessor, concurrency) - dec = record.NewDecoder(syms) - shards = make([][]record.RefSample, concurrency) + dec record.Decoder + shards = make([][]record.RefSample, concurrency) + histogramShards = make([][]histogramRecord, concurrency) decodedCh = make(chan interface{}, 10) decodeErr error @@ -672,6 +672,16 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return []record.RefMmapMarker{} }, } + histogramSamplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefHistogramSample{} + }, + } + floatHistogramSamplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefFloatHistogramSample{} + }, + } ) defer func() { @@ -692,8 +702,9 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch processors[i].setup() go func(wp *wblSubsetProcessor) { - unknown := wp.processWBLSamples(h) + unknown, unknownHistograms := wp.processWBLSamples(h) unknownRefs.Add(unknown) + unknownHistogramRefs.Add(unknownHistograms) wg.Done() }(&processors[i]) } @@ -727,6 +738,30 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- markers + case record.HistogramSamples: + hists := histogramSamplesPool.Get().([]record.RefHistogramSample)[:0] + hists, err = dec.HistogramSamples(rec, hists) + if err != nil { + decodeErr = &wlog.CorruptionErr{ + Err: fmt.Errorf("decode histograms: %w", err), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decodedCh <- hists + case record.FloatHistogramSamples: + hists := floatHistogramSamplesPool.Get().([]record.RefFloatHistogramSample)[:0] + hists, err = dec.FloatHistogramSamples(rec, hists) + if err != nil { + decodeErr = &wlog.CorruptionErr{ + Err: fmt.Errorf("decode float histograms: %w", err), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decodedCh <- hists default: // Noop. } @@ -791,6 +826,70 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch idx := uint64(ms.ref) % uint64(concurrency) processors[idx].input <- wblSubsetProcessorInputItem{mmappedSeries: ms} } + case []record.RefHistogramSample: + samples := v + // We split up the samples into chunks of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(samples) > 0 { + m := 5000 + if len(samples) < m { + m = len(samples) + } + for i := 0; i < concurrency; i++ { + if histogramShards[i] == nil { + histogramShards[i] = processors[i].reuseHistogramBuf() + } + } + for _, sam := range samples[:m] { + if r, ok := multiRef[sam.Ref]; ok { + sam.Ref = r + } + mod := uint64(sam.Ref) % uint64(concurrency) + histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) + } + for i := 0; i < concurrency; i++ { + if len(histogramShards[i]) > 0 { + processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]} + histogramShards[i] = nil + } + } + samples = samples[m:] + } + histogramSamplesPool.Put(v) //nolint:staticcheck + case []record.RefFloatHistogramSample: + samples := v + // We split up the samples into chunks of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(samples) > 0 { + m := 5000 + if len(samples) < m { + m = len(samples) + } + for i := 0; i < concurrency; i++ { + if histogramShards[i] == nil { + histogramShards[i] = processors[i].reuseHistogramBuf() + } + } + for _, sam := range samples[:m] { + if r, ok := multiRef[sam.Ref]; ok { + sam.Ref = r + } + mod := uint64(sam.Ref) % uint64(concurrency) + histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) + } + for i := 0; i < concurrency; i++ { + if len(histogramShards[i]) > 0 { + processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]} + histogramShards[i] = nil + } + } + samples = samples[m:] + } + floatHistogramSamplesPool.Put(v) //nolint:staticcheck default: panic(fmt.Errorf("unexpected decodedCh type: %T", d)) } @@ -811,7 +910,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) + h.logger.Warn("Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) } return nil } @@ -833,17 +932,20 @@ func (e errLoadWbl) Unwrap() error { } type wblSubsetProcessor struct { - input chan wblSubsetProcessorInputItem - output chan []record.RefSample + input chan wblSubsetProcessorInputItem + output chan []record.RefSample + histogramsOutput chan []histogramRecord } type wblSubsetProcessorInputItem struct { - mmappedSeries *memSeries - samples []record.RefSample + mmappedSeries *memSeries + samples []record.RefSample + histogramSamples []histogramRecord } func (wp *wblSubsetProcessor) setup() { wp.output = make(chan []record.RefSample, 300) + wp.histogramsOutput = make(chan []histogramRecord, 300) wp.input = make(chan wblSubsetProcessorInputItem, 300) } @@ -851,6 +953,8 @@ func (wp *wblSubsetProcessor) closeAndDrain() { close(wp.input) for range wp.output { } + for range wp.histogramsOutput { + } } // If there is a buffer in the output chan, return it for reuse, otherwise return nil. @@ -863,10 +967,21 @@ func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample { return nil } +// If there is a buffer in the output chan, return it for reuse, otherwise return nil. +func (wp *wblSubsetProcessor) reuseHistogramBuf() []histogramRecord { + select { + case buf := <-wp.histogramsOutput: + return buf[:0] + default: + } + return nil +} + // processWBLSamples adds the samples it receives to the head and passes // the buffer received to an output channel for reuse. -func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { +func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHistogramRefs uint64) { defer close(wp.output) + defer close(wp.histogramsOutput) oooCapMax := h.opts.OutOfOrderCapMax.Load() // We don't check for minValidTime for ooo samples. @@ -905,11 +1020,41 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { case wp.output <- in.samples: default: } + for _, s := range in.histogramSamples { + ms := h.series.getByID(s.ref) + if ms == nil { + unknownHistogramRefs++ + continue + } + var chunkCreated bool + var ok bool + if s.h != nil { + ok, chunkCreated, _ = ms.insert(s.t, 0, s.h, nil, h.chunkDiskMapper, oooCapMax, h.logger) + } else { + ok, chunkCreated, _ = ms.insert(s.t, 0, nil, s.fh, h.chunkDiskMapper, oooCapMax, h.logger) + } + if chunkCreated { + h.metrics.chunksCreated.Inc() + h.metrics.chunks.Inc() + } + if ok { + if s.t > maxt { + maxt = s.t + } + if s.t < mint { + mint = s.t + } + } + } + select { + case wp.histogramsOutput <- in.histogramSamples: + default: + } } h.updateMinOOOMaxOOOTime(mint, maxt) - return unknownRefs + return unknownRefs, unknownHistogramRefs } const ( @@ -1066,7 +1211,7 @@ const chunkSnapshotPrefix = "chunk_snapshot." func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if h.wal == nil { // If we are not storing any WAL, does not make sense to take a snapshot too. - level.Warn(h.logger).Log("msg", "skipping chunk snapshotting as WAL is disabled") + h.logger.Warn("skipping chunk snapshotting as WAL is disabled") return &ChunkSnapshotStats{}, nil } h.chunkSnapshotMtx.Lock() @@ -1215,7 +1360,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { // Leftover old chunk snapshots do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher chunk snapshot exists. - level.Error(h.logger).Log("msg", "delete old chunk snapshots", "err", err) + h.logger.Error("delete old chunk snapshots", "err", err) } return stats, nil } @@ -1225,12 +1370,12 @@ func chunkSnapshotDir(wlast, woffset int) string { } func (h *Head) performChunkSnapshot() error { - level.Info(h.logger).Log("msg", "creating chunk snapshot") + h.logger.Info("creating chunk snapshot") startTime := time.Now() stats, err := h.ChunkSnapshot() elapsed := time.Since(startTime) if err == nil { - level.Info(h.logger).Log("msg", "chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) + h.logger.Info("chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) } if err != nil { return fmt.Errorf("chunk snapshot: %w", err) @@ -1345,7 +1490,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + h.logger.Warn("error while closing the wal segments reader", "err", err) } }() @@ -1534,9 +1679,9 @@ Outer: } elapsed := time.Since(start) - level.Info(h.logger).Log("msg", "chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) + h.logger.Info("chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) if unknownRefs > 0 { - level.Warn(h.logger).Log("msg", "unknown series references during chunk snapshot replay", "count", unknownRefs) + h.logger.Warn("unknown series references during chunk snapshot replay", "count", unknownRefs) } return snapIdx, snapOffset, refSeries, nil diff --git a/tsdb/index/index.go b/tsdb/index/index.go index db0b9b88b8f..8c0f698eaec 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -438,7 +438,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... return err } if labels.Compare(lset, w.lastSeries) <= 0 { - return fmt.Errorf("out-of-order series added with label set %q", lset) + return fmt.Errorf("out-of-order series added with label set %q, last label set %q", lset, w.lastSeries) } if ref < w.lastSeriesRef && !w.lastSeries.IsEmpty() { @@ -2067,5 +2067,5 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index bfe74c323d4..5ed41f76988 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -345,13 +345,22 @@ func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) { p.mtx.Unlock() } +func appendWithExponentialGrowth[T any](a []T, v T) []T { + if cap(a) < len(a)+1 { + newList := make([]T, len(a), len(a)*2+1) + copy(newList, a) + a = newList + } + return append(a, v) +} + func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { nm, ok := p.m[l.Name] if !ok { nm = map[string][]storage.SeriesRef{} p.m[l.Name] = nm } - list := append(nm[l.Value], id) + list := appendWithExponentialGrowth(nm[l.Value], id) nm[l.Value] = list if !p.ordered { diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 6b54aeb7d53..26cd4d057e9 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -112,7 +112,7 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap return nil } for _, chk := range chks { - addChunk(c.minTime, c.maxTime, ref, chk.chunk) + addChunk(chk.minTime, chk.maxTime, ref, chk.chunk) } } else { var emptyChunk chunkenc.Chunk diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index b9f2133eafd..17f551dd7d8 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -389,6 +389,7 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) { func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenario) { chunkRange := int64(2000) head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, true) + head.opts.EnableOOONativeHistograms.Store(true) t.Cleanup(func() { require.NoError(t, head.Close()) }) ctx := context.Background() @@ -493,6 +494,8 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { opts := DefaultOptions() opts.OutOfOrderCapMax = 5 opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true s1 := labels.FromStrings("l", "v1") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } @@ -875,7 +878,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { } resultSamples, err := storage.ExpandSamples(it, nil) require.NoError(t, err) - requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, true) + requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, requireEqualSamplesIgnoreCounterResets) } }) } @@ -902,6 +905,8 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( opts := DefaultOptions() opts.OutOfOrderCapMax = 5 opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds() + opts.EnableNativeHistograms = true + opts.EnableOOONativeHistograms = true s1 := labels.FromStrings("l", "v1") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } @@ -1049,7 +1054,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( it := iterable.Iterator(nil) resultSamples, err := storage.ExpandSamples(it, nil) require.NoError(t, err) - requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, true) + requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, requireEqualSamplesIgnoreCounterResets) } }) } diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index d3cd5f6016b..b9badfea211 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -28,15 +28,14 @@ import ( const testMaxSize int = 32 // Formulas chosen to make testing easy. -func valEven(pos int) int { return pos*2 + 2 } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values -func valOdd(pos int) int { return pos*2 + 1 } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals. - -func samplify(v int) sample { return sample{int64(v), float64(v), nil, nil} } +// Formulas chosen to make testing easy. +func valEven(pos int) int64 { return int64(pos*2 + 2) } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values +func valOdd(pos int) int64 { return int64(pos*2 + 1) } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals. -func makeEvenSampleSlice(n int) []sample { +func makeEvenSampleSlice(n int, sampleFunc func(ts int64) sample) []sample { s := make([]sample, n) for i := 0; i < n; i++ { - s[i] = samplify(valEven(i)) + s[i] = sampleFunc(valEven(i)) } return s } @@ -45,8 +44,36 @@ func makeEvenSampleSlice(n int) []sample { // - Number of pre-existing samples anywhere from 0 to testMaxSize-1. // - Insert new sample before first pre-existing samples, after the last, and anywhere in between. // - With a chunk initial capacity of testMaxSize/8 and testMaxSize, which lets us test non-full and full chunks, and chunks that need to expand themselves. -// Note: In all samples used, t always equals v in numeric value. when we talk about 'value' we just refer to a value that will be used for both sample.t and sample.v. func TestOOOInsert(t *testing.T) { + scenarios := map[string]struct { + sampleFunc func(ts int64) sample + }{ + "float": { + sampleFunc: func(ts int64) sample { + return sample{t: ts, f: float64(ts)} + }, + }, + "integer histogram": { + sampleFunc: func(ts int64) sample { + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + }, + }, + "float histogram": { + sampleFunc: func(ts int64) sample { + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + }, + }, + } + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + testOOOInsert(t, scenario.sampleFunc) + }) + } +} + +func testOOOInsert(t *testing.T, + sampleFunc func(ts int64) sample, +) { for numPreExisting := 0; numPreExisting <= testMaxSize; numPreExisting++ { // For example, if we have numPreExisting 2, then: // chunk.samples indexes filled 0 1 @@ -56,20 +83,21 @@ func TestOOOInsert(t *testing.T) { for insertPos := 0; insertPos <= numPreExisting; insertPos++ { chunk := NewOOOChunk() - chunk.samples = makeEvenSampleSlice(numPreExisting) - newSample := samplify(valOdd(insertPos)) - chunk.Insert(newSample.t, newSample.f, nil, nil) + chunk.samples = make([]sample, numPreExisting) + chunk.samples = makeEvenSampleSlice(numPreExisting, sampleFunc) + newSample := sampleFunc(valOdd(insertPos)) + chunk.Insert(newSample.t, newSample.f, newSample.h, newSample.fh) var expSamples []sample // Our expected new samples slice, will be first the original samples. for i := 0; i < insertPos; i++ { - expSamples = append(expSamples, samplify(valEven(i))) + expSamples = append(expSamples, sampleFunc(valEven(i))) } // Then the new sample. expSamples = append(expSamples, newSample) // Followed by any original samples that were pushed back by the new one. for i := insertPos; i < numPreExisting; i++ { - expSamples = append(expSamples, samplify(valEven(i))) + expSamples = append(expSamples, sampleFunc(valEven(i))) } require.Equal(t, expSamples, chunk.samples, "numPreExisting %d, insertPos %d", numPreExisting, insertPos) @@ -81,17 +109,46 @@ func TestOOOInsert(t *testing.T) { // pre-existing samples, with between 1 and testMaxSize pre-existing samples and // with a chunk initial capacity of testMaxSize/8 and testMaxSize, which lets us test non-full and full chunks, and chunks that need to expand themselves. func TestOOOInsertDuplicate(t *testing.T) { + scenarios := map[string]struct { + sampleFunc func(ts int64) sample + }{ + "float": { + sampleFunc: func(ts int64) sample { + return sample{t: ts, f: float64(ts)} + }, + }, + "integer histogram": { + sampleFunc: func(ts int64) sample { + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + }, + }, + "float histogram": { + sampleFunc: func(ts int64) sample { + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + }, + }, + } + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + testOOOInsertDuplicate(t, scenario.sampleFunc) + }) + } +} + +func testOOOInsertDuplicate(t *testing.T, + sampleFunc func(ts int64) sample, +) { for num := 1; num <= testMaxSize; num++ { for dupPos := 0; dupPos < num; dupPos++ { chunk := NewOOOChunk() - chunk.samples = makeEvenSampleSlice(num) + chunk.samples = makeEvenSampleSlice(num, sampleFunc) dupSample := chunk.samples[dupPos] dupSample.f = 0.123 - ok := chunk.Insert(dupSample.t, dupSample.f, nil, nil) + ok := chunk.Insert(dupSample.t, dupSample.f, dupSample.h, dupSample.fh) - expSamples := makeEvenSampleSlice(num) // We expect no change. + expSamples := makeEvenSampleSlice(num, sampleFunc) // We expect no change. require.False(t, ok) require.Equal(t, expSamples, chunk.samples, "num %d, dupPos %d", num, dupPos) } diff --git a/tsdb/querier.go b/tsdb/querier.go index 912c950329a..b80faf881e8 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -254,6 +254,10 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc return nil, err } its = append(its, allPostings) + case m.Type == labels.MatchRegexp && m.Value == ".*": + // .* regexp matches any string: do nothing. + case m.Type == labels.MatchNotRegexp && m.Value == ".*": + return index.EmptyPostings(), nil case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") @@ -1018,9 +1022,9 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { if newChunk != nil { if !recoded { p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) + cmint = t } currentChunk = newChunk - cmint = t } cmaxt = t diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 43accc253b2..33dca1284df 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -105,17 +105,17 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) { jFoo := labels.MustNewMatcher(labels.MatchEqual, "j", "foo") jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo") - iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$") - i1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.*$") - iStar1 := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1$") - iStar1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1.*$") - iPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$") - i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.+$") - iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "^$") + iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*") + i1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.*") + iStar1 := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*1") + iStar1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*1.*") + iPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+") + i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.+") + iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "") iNotEmpty := labels.MustNewMatcher(labels.MatchNotEqual, "i", "") iNot2 := labels.MustNewMatcher(labels.MatchNotEqual, "i", "2"+postingsBenchSuffix) - iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^2.*$") - iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*2.*$") + iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "2.*") + iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*2.*") jFooBar := labels.MustNewMatcher(labels.MatchRegexp, "j", "foo|bar") jXXXYYY := labels.MustNewMatcher(labels.MatchRegexp, "j", "XXX|YYY") jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+") @@ -186,13 +186,13 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) { i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.+") i1PostingsBenchSuffix := labels.MustNewMatcher(labels.MatchEqual, "i", "1"+postingsBenchSuffix) iSuffix := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+ddd") - iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$") + iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*") jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo") jXXXYYY := labels.MustNewMatcher(labels.MatchRegexp, "j", "XXX|YYY") jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+") n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix) nX := labels.MustNewMatcher(labels.MatchNotEqual, "n", "X"+postingsBenchSuffix) - nPlus := labels.MustNewMatcher(labels.MatchRegexp, "n", "^.+$") + nPlus := labels.MustNewMatcher(labels.MatchRegexp, "n", ".+") ctx := context.Background() @@ -205,12 +205,12 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) { {`i with i="1"`, "i", []*labels.Matcher{i1}}, // i has 100k values. {`i with n="1"`, "i", []*labels.Matcher{n1}}, - {`i with n="^.+$"`, "i", []*labels.Matcher{nPlus}}, + {`i with n=".+"`, "i", []*labels.Matcher{nPlus}}, {`i with n="1",j!="foo"`, "i", []*labels.Matcher{n1, jNotFoo}}, {`i with n="1",j=~"X.+"`, "i", []*labels.Matcher{n1, jXplus}}, {`i with n="1",j=~"XXX|YYY"`, "i", []*labels.Matcher{n1, jXXXYYY}}, {`i with n="X",j!="foo"`, "i", []*labels.Matcher{nX, jNotFoo}}, - {`i with n="1",i=~"^.*$",j!="foo"`, "i", []*labels.Matcher{n1, iStar, jNotFoo}}, + {`i with n="1",i=~".*",j!="foo"`, "i", []*labels.Matcher{n1, iStar, jNotFoo}}, // matchers on i itself {`i with i="1aaa...ddd"`, "i", []*labels.Matcher{i1PostingsBenchSuffix}}, {`i with i=~"1.+"`, "i", []*labels.Matcher{i1Plus}}, diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 858b707932e..aca6c845b18 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2689,6 +2689,7 @@ func TestPostingsForMatchers(t *testing.T) { app.Append(0, labels.FromStrings("n", "1"), 0, 0) app.Append(0, labels.FromStrings("n", "1", "i", "a"), 0, 0) app.Append(0, labels.FromStrings("n", "1", "i", "b"), 0, 0) + app.Append(0, labels.FromStrings("n", "1", "i", "\n"), 0, 0) app.Append(0, labels.FromStrings("n", "2"), 0, 0) app.Append(0, labels.FromStrings("n", "2.5"), 0, 0) require.NoError(t, app.Commit()) @@ -2704,6 +2705,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2722,6 +2724,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), labels.FromStrings("n", "2"), labels.FromStrings("n", "2.5"), }, @@ -2739,6 +2742,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2750,6 +2754,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2757,6 +2762,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, // Regex. @@ -2766,6 +2772,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2801,6 +2808,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2808,6 +2816,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, // Not regex. @@ -2816,6 +2825,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2849,12 +2859,14 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a?$")}, exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2862,6 +2874,7 @@ func TestPostingsForMatchers(t *testing.T) { exp: []labels.Labels{ labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), }, }, { @@ -2895,6 +2908,7 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "1"), labels.FromStrings("n", "1", "i", "a"), labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), labels.FromStrings("n", "2"), }, }, @@ -2942,6 +2956,57 @@ func TestPostingsForMatchers(t *testing.T) { labels.FromStrings("n", "2.5"), }, }, + // Test shortcut for i=~".*" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, + // Test shortcut for n=~".*" and i=~"^.*$" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "1", "i", "\n"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, + // Test shortcut for n=~"^.*$" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchEqual, "i", "a")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1", "i", "a"), + }, + }, + // Test shortcut for i!~".*" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")}, + exp: []labels.Labels{}, + }, + // Test shortcut for n!~"^.*$", i!~".*". First one triggers empty result. + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")}, + exp: []labels.Labels{}, + }, + // Test shortcut i!~".*" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")}, + exp: []labels.Labels{}, + }, + // Test shortcut i!~"^.*$" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")}, + exp: []labels.Labels{}, + }, } ir, err := h.Index() @@ -3235,7 +3300,7 @@ func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, stri } func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { - return nil, errors.New("label names for for called") + return nil, errors.New("label names for called") } func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Postings, error) { @@ -3722,3 +3787,35 @@ func (m mockReaderOfLabels) Series(storage.SeriesRef, *labels.ScratchBuilder, *[ func (m mockReaderOfLabels) Symbols() index.StringIter { panic("Series called") } + +// TestMergeQuerierConcurrentSelectMatchers reproduces the data race bug from +// https://github.com/prometheus/prometheus/issues/14723, when one of the queriers (blockQuerier in this case) +// alters the passed matchers. +func TestMergeQuerierConcurrentSelectMatchers(t *testing.T) { + block, err := OpenBlock(nil, createBlock(t, t.TempDir(), genSeries(1, 1, 0, 1)), nil) + require.NoError(t, err) + defer func() { + require.NoError(t, block.Close()) + }() + p, err := NewBlockQuerier(block, 0, 1) + require.NoError(t, err) + + // A secondary querier is required to enable concurrent select; a blockQuerier is used for simplicity. + s, err := NewBlockQuerier(block, 0, 1) + require.NoError(t, err) + + originalMatchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, "baz", ".*"), + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + } + matchers := append([]*labels.Matcher{}, originalMatchers...) + + mergedQuerier := storage.NewMergeQuerier([]storage.Querier{p}, []storage.Querier{s}, storage.ChainedSeriesMerge) + defer func() { + require.NoError(t, mergedQuerier.Close()) + }() + + mergedQuerier.Select(context.Background(), false, nil, matchers...) + + require.Equal(t, originalMatchers, matchers) +} diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index da7748e1873..f3a657aecbc 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -166,7 +166,7 @@ func TestRecord_EncodeDecode(t *testing.T) { require.NoError(t, err) require.Equal(t, floatHistograms, decFloatHistograms) - // Gauge ingeger histograms. + // Gauge integer histograms. for i := range histograms { histograms[i].H.CounterResetHint = histogram.GaugeType } diff --git a/tsdb/repair.go b/tsdb/repair.go index 9d2c5738d17..8bdc645b5e3 100644 --- a/tsdb/repair.go +++ b/tsdb/repair.go @@ -17,19 +17,17 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" ) // repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in // commit 129773b41a565fde5156301e37f9a87158030443. -func repairBadIndexVersion(logger log.Logger, dir string) error { +func repairBadIndexVersion(logger *slog.Logger, dir string) error { // All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected. // We must actually set the index file version to 2 and revert the meta.json version back to 1. dirs, err := blockDirs(dir) @@ -41,7 +39,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { defer func() { for _, tmp := range tmpFiles { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } } }() @@ -49,20 +47,20 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { for _, d := range dirs { meta, err := readBogusMetaFile(d) if err != nil { - level.Error(logger).Log("msg", "failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) + logger.Error("failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) continue } if meta.Version == metaVersion1 { - level.Info(logger).Log( - "msg", "Found healthy block", + logger.Info( + "Found healthy block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, ) continue } - level.Info(logger).Log( - "msg", "Fixing broken block", + logger.Info( + "Fixing broken block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, diff --git a/tsdb/testutil.go b/tsdb/testutil.go index 9730e471327..03587f4e2c3 100644 --- a/tsdb/testutil.go +++ b/tsdb/testutil.go @@ -16,6 +16,8 @@ package tsdb import ( "testing" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" @@ -27,7 +29,11 @@ import ( ) const ( - float = "float" + float = "float" + intHistogram = "integer histogram" + floatHistogram = "float histogram" + gaugeIntHistogram = "gauge int histogram" + gaugeFloatHistogram = "gauge float histogram" ) type testValue struct { @@ -42,7 +48,6 @@ type sampleTypeScenario struct { sampleFunc func(ts, value int64) sample } -// TODO: native histogram sample types will be added as part of out-of-order native histogram support; see #11220. var sampleTypeScenarios = map[string]sampleTypeScenario{ float: { sampleType: sampleMetricTypeFloat, @@ -55,50 +60,50 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{ return sample{t: ts, f: float64(value)} }, }, - // intHistogram: { - // sampleType: sampleMetricTypeHistogram, - // appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - // s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} - // ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) - // return ref, s, err - // }, - // sampleFunc: func(ts, value int64) sample { - // return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} - // }, - // }, - // floatHistogram: { - // sampleType: sampleMetricTypeHistogram, - // appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - // s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} - // ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) - // return ref, s, err - // }, - // sampleFunc: func(ts, value int64) sample { - // return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} - // }, - // }, - // gaugeIntHistogram: { - // sampleType: sampleMetricTypeHistogram, - // appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - // s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} - // ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) - // return ref, s, err - // }, - // sampleFunc: func(ts, value int64) sample { - // return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} - // }, - // }, - // gaugeFloatHistogram: { - // sampleType: sampleMetricTypeHistogram, - // appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - // s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} - // ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) - // return ref, s, err - // }, - // sampleFunc: func(ts, value int64) sample { - // return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} - // }, - // }, + intHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} + ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} + }, + }, + floatHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} + ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} + }, + }, + gaugeIntHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} + ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} + }, + }, + gaugeFloatHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} + ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} + }, + }, } // requireEqualSeries checks that the actual series are equal to the expected ones. It ignores the counter reset hints for histograms. @@ -106,7 +111,7 @@ func requireEqualSeries(t *testing.T, expected, actual map[string][]chunks.Sampl for name, expectedItem := range expected { actualItem, ok := actual[name] require.True(t, ok, "Expected series %s not found", name) - requireEqualSamples(t, name, expectedItem, actualItem, ignoreCounterResets) + requireEqualSamples(t, name, expectedItem, actualItem, requireEqualSamplesIgnoreCounterResets) } for name := range actual { _, ok := expected[name] @@ -121,7 +126,28 @@ func requireEqualOOOSamples(t *testing.T, expectedSamples int, db *DB) { "number of ooo appended samples mismatch") } -func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sample, ignoreCounterResets bool) { +type requireEqualSamplesOption int + +const ( + requireEqualSamplesNoOption requireEqualSamplesOption = iota + requireEqualSamplesIgnoreCounterResets + requireEqualSamplesInUseBucketCompare +) + +func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sample, options ...requireEqualSamplesOption) { + var ( + ignoreCounterResets bool + inUseBucketCompare bool + ) + for _, option := range options { + switch option { + case requireEqualSamplesIgnoreCounterResets: + ignoreCounterResets = true + case requireEqualSamplesInUseBucketCompare: + inUseBucketCompare = true + } + } + require.Equal(t, len(expected), len(actual), "Length not equal to expected for %s", name) for i, s := range expected { expectedSample := s @@ -139,6 +165,10 @@ func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sa } else { require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint)) } + if inUseBucketCompare { + expectedSample.H().Compact(0) + actualSample.H().Compact(0) + } require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T()) } case s.FH() != nil: @@ -151,6 +181,10 @@ func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sa } else { require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint)) } + if inUseBucketCompare { + expectedSample.FH().Compact(0) + actualSample.FH().Compact(0) + } require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T()) } default: diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index 4cea5005dbc..dcba298f3bb 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -19,15 +19,13 @@ import ( "fmt" "hash" "hash/crc32" + "log/slog" "math" "os" "path/filepath" "sort" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -76,7 +74,7 @@ type Reader interface { Close() error } -func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { +func WriteFile(logger *slog.Logger, dir string, tr Reader) (int64, error) { path := filepath.Join(dir, TombstonesFilename) tmp := path + ".tmp" hash := newCRC32() @@ -89,11 +87,11 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { defer func() { if f != nil { if err := f.Close(); err != nil { - level.Error(logger).Log("msg", "close tmp file", "err", err.Error()) + logger.Error("close tmp file", "err", err.Error()) } } if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go index 36c9f1c1e3f..cbf686e4bb5 100644 --- a/tsdb/tombstones/tombstones_test.go +++ b/tsdb/tombstones/tombstones_test.go @@ -20,10 +20,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/stretchr/testify/require" "go.uber.org/goleak" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/storage" ) @@ -50,7 +51,7 @@ func TestWriteAndReadbackTombstones(t *testing.T) { stones.AddInterval(storage.SeriesRef(ref), dranges...) } - _, err := WriteFile(log.NewNopLogger(), tmpdir, stones) + _, err := WriteFile(promslog.NewNopLogger(), tmpdir, stones) require.NoError(t, err) restr, _, err := ReadTombstones(tmpdir) diff --git a/tsdb/tsdbblockutil.go b/tsdb/tsdbblockutil.go index f7b27c2e08e..b49757223f1 100644 --- a/tsdb/tsdbblockutil.go +++ b/tsdb/tsdbblockutil.go @@ -16,10 +16,9 @@ package tsdb import ( "context" "fmt" + "log/slog" "path/filepath" - "github.com/go-kit/log" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -27,7 +26,7 @@ import ( var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time") // CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk. -func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger log.Logger) (string, error) { +func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) { if chunkRange == 0 { chunkRange = DefaultBlockDuration } @@ -41,7 +40,7 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l } defer func() { if err := w.Close(); err != nil { - logger.Log("err closing blockwriter", err.Error()) + logger.Error("err closing blockwriter", "err", err.Error()) } }() diff --git a/tsdb/tsdbutil/dir_locker.go b/tsdb/tsdbutil/dir_locker.go index fa939879cad..4b69e1f9d61 100644 --- a/tsdb/tsdbutil/dir_locker.go +++ b/tsdb/tsdbutil/dir_locker.go @@ -16,11 +16,10 @@ package tsdbutil import ( "errors" "fmt" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -34,7 +33,7 @@ const ( ) type DirLocker struct { - logger log.Logger + logger *slog.Logger createdCleanly prometheus.Gauge @@ -43,7 +42,7 @@ type DirLocker struct { } // NewDirLocker creates a DirLocker that can obtain an exclusive lock on dir. -func NewDirLocker(dir, subsystem string, l log.Logger, r prometheus.Registerer) (*DirLocker, error) { +func NewDirLocker(dir, subsystem string, l *slog.Logger, r prometheus.Registerer) (*DirLocker, error) { lock := &DirLocker{ logger: l, createdCleanly: prometheus.NewGauge(prometheus.GaugeOpts{ @@ -74,7 +73,7 @@ func (l *DirLocker) Lock() error { } if _, err := os.Stat(l.path); err == nil { - level.Warn(l.logger).Log("msg", "A lockfile from a previous execution already existed. It was replaced", "file", l.path) + l.logger.Warn("A lockfile from a previous execution already existed. It was replaced", "file", l.path) l.createdCleanly.Set(lockfileReplaced) } else { diff --git a/tsdb/tsdbutil/dir_locker_test.go b/tsdb/tsdbutil/dir_locker_test.go index fc7d905b2d7..65e27616920 100644 --- a/tsdb/tsdbutil/dir_locker_test.go +++ b/tsdb/tsdbutil/dir_locker_test.go @@ -16,15 +16,16 @@ package tsdbutil import ( "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/util/testutil" ) func TestLockfile(t *testing.T) { TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*DirLocker, testutil.Closer) { - locker, err := NewDirLocker(data, "tsdbutil", log.NewNopLogger(), nil) + locker, err := NewDirLocker(data, "tsdbutil", promslog.NewNopLogger(), nil) require.NoError(t, err) if createLock { diff --git a/tsdb/tsdbutil/dir_locker_testutil.go b/tsdb/tsdbutil/dir_locker_testutil.go index a4cf5abd68c..7228dbafed6 100644 --- a/tsdb/tsdbutil/dir_locker_testutil.go +++ b/tsdb/tsdbutil/dir_locker_testutil.go @@ -18,8 +18,8 @@ import ( "os" "testing" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/util/testutil" @@ -68,7 +68,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat // Test preconditions (file already exists + lockfile option) if c.fileAlreadyExists { - tmpLocker, err := NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) + tmpLocker, err := NewDirLocker(tmpdir, "tsdb", promslog.NewNopLogger(), nil) require.NoError(t, err) err = os.WriteFile(tmpLocker.path, []byte{}, 0o644) require.NoError(t, err) diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index a16cd5fc749..58e11c770e0 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -25,9 +26,6 @@ import ( "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -94,11 +92,11 @@ const checkpointPrefix = "checkpoint." // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser - level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) + logger.Info("Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) { var sgmRange []SegmentRange diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index a9786454de7..8ee193f5ac4 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -23,9 +23,10 @@ import ( "strings" "testing" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" @@ -244,7 +245,7 @@ func TestCheckpoint(t *testing.T) { } require.NoError(t, w.Close()) - stats, err := Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool { + stats, err := Checkpoint(promslog.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool { return x%2 == 0 }, last/2) require.NoError(t, err) @@ -354,7 +355,7 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) { require.NoError(t, f.Close()) // Run the checkpoint and since the wlog contains corrupt data this should return an error. - _, err = Checkpoint(log.NewNopLogger(), w, 0, 1, nil, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 0, 1, nil, 0) require.Error(t, err) // Walk the wlog dir to make sure there are no tmp folder left behind after the error. diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go index 6eaef5f3960..a017d362d15 100644 --- a/tsdb/wlog/live_reader.go +++ b/tsdb/wlog/live_reader.go @@ -20,9 +20,8 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" @@ -51,7 +50,7 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { } // NewLiveReader returns a new live reader. -func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { +func NewLiveReader(logger *slog.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. zstdReader, _ := zstd.NewReader(nil) @@ -73,7 +72,7 @@ func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) * // that are still in the process of being written, and returns records as soon // as they can be read. type LiveReader struct { - logger log.Logger + logger *slog.Logger rdr io.Reader err error rec []byte @@ -311,7 +310,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize) } r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc() - level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) + r.logger.Warn("Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) } if recordHeaderSize+length > pageSize { return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize) diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 484eff3664a..2ac63cbf150 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -29,11 +29,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/common/promslog" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/prometheus/prometheus/util/testutil" ) type reader interface { @@ -53,7 +53,7 @@ var readerConstructors = map[string]func(io.Reader) reader{ return NewReader(r) }, "LiveReader": func(r io.Reader) reader { - lr := NewLiveReader(log.NewNopLogger(), NewLiveReaderMetrics(nil), r) + lr := NewLiveReader(promslog.NewNopLogger(), NewLiveReaderMetrics(nil), r) lr.eofNonErr = true return lr }, @@ -196,7 +196,7 @@ func TestReader(t *testing.T) { } func TestReader_Live(t *testing.T) { - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() for i := range testReaderCases { t.Run(strconv.Itoa(i), func(t *testing.T) { @@ -353,7 +353,7 @@ func TestReaderFuzz(t *testing.T) { } func TestReaderFuzz_Live(t *testing.T) { - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { dir := t.TempDir() @@ -441,7 +441,7 @@ func TestReaderFuzz_Live(t *testing.T) { func TestLiveReaderCorrupt_ShortFile(t *testing.T) { // Write a corrupt WAL segment, there is one record of pageSize in length, // but the segment is only half written. - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize, CompressionNone) @@ -481,7 +481,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { // Write a corrupt WAL segment, when record len > page size. - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize*2, CompressionNone) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index ac5041e87b9..d68ef2accb8 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -24,9 +25,8 @@ import ( "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -84,7 +84,7 @@ type WatcherMetrics struct { type Watcher struct { name string writer WriteTo - logger log.Logger + logger *slog.Logger walDir string lastCheckpoint string sendExemplars bool @@ -172,9 +172,9 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { } // NewWatcher creates a new WAL watcher for a given WriteTo. -func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { +func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger *slog.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Watcher{ logger: logger, @@ -222,7 +222,7 @@ func (w *Watcher) setMetrics() { // Start the Watcher. func (w *Watcher) Start() { w.setMetrics() - level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name) + w.logger.Info("Starting WAL watcher", "queue", w.name) go w.loop() } @@ -241,7 +241,7 @@ func (w *Watcher) Stop() { w.metrics.currentSegment.DeleteLabelValues(w.name) } - level.Info(w.logger).Log("msg", "WAL watcher stopped", "queue", w.name) + w.logger.Info("WAL watcher stopped", "queue", w.name) } func (w *Watcher) loop() { @@ -251,7 +251,7 @@ func (w *Watcher) loop() { for !isClosed(w.quit) { w.SetStartTime(time.Now()) if err := w.Run(); err != nil { - level.Error(w.logger).Log("msg", "error tailing WAL", "err", err) + w.logger.Error("error tailing WAL", "err", err) } select { @@ -274,7 +274,7 @@ func (w *Watcher) Run() error { // Run will be called again if there was a failure to read the WAL. w.sendSamples = false - level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name) + w.logger.Info("Replaying WAL", "queue", w.name) // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) @@ -294,13 +294,13 @@ func (w *Watcher) Run() error { return err } - level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) + w.logger.Debug("Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) for !isClosed(w.quit) { w.currentSegmentMetric.Set(float64(currentSegment)) // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. - level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + w.logger.Debug("Processing segment", "currentSegment", currentSegment) if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) { return err } @@ -338,9 +338,9 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { if err != nil && !errors.Is(err, io.EOF) { - level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) + w.logger.Warn("Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if r.Offset() != size { - level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) + w.logger.Warn("Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) } return ErrIgnorable } @@ -403,7 +403,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { <-gcSem }() if err := w.garbageCollectSeries(segmentNum); err != nil { - level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err) + w.logger.Warn("Error process checkpoint", "err", err) } }() default: @@ -424,7 +424,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // we haven't read due to a notification in quite some time, try reading anyways case <-readTicker.C: - level.Debug(w.logger).Log("msg", "Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) + w.logger.Debug("Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) err := w.readAndHandleError(reader, segmentNum, tail, size) if err != nil { return err @@ -460,11 +460,11 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { } if index >= segmentNum { - level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) + w.logger.Debug("Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) return nil } - level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) + w.logger.Debug("New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { return fmt.Errorf("readCheckpoint: %w", err) @@ -519,7 +519,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } samplesToSend = append(samplesToSend, s) } @@ -564,7 +564,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } histogramsToSend = append(histogramsToSend, h) } @@ -592,7 +592,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } floatHistogramsToSend = append(floatHistogramsToSend, fh) } @@ -670,7 +670,7 @@ type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) er // Read all the series records from a Checkpoint directory. func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) error { - level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) + w.logger.Debug("Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { return fmt.Errorf("checkpointNum: %w", err) @@ -704,7 +704,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err } } - level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir) + w.logger.Debug("Read series references from checkpoint", "checkpoint", checkpointDir) return nil } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index dc0314e8c91..68c2c5afdac 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -22,9 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -52,6 +53,13 @@ func retry(t *testing.T, interval time.Duration, n int, f func() bool) { t.Logf("function returned false") } +// Overwrite readTimeout defined in watcher.go. +func overwriteReadTimeout(t *testing.T, val time.Duration) { + initialVal := readTimeout + readTimeout = val + t.Cleanup(func() { readTimeout = initialVal }) +} + type writeToMock struct { samplesAppended int exemplarsAppended int @@ -302,7 +310,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { } } require.NoError(t, w.Log(recs...)) - readTimeout = time.Second + overwriteReadTimeout(t, time.Second) _, _, err = Segments(w.Dir()) require.NoError(t, err) @@ -367,7 +375,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { } } - Checkpoint(log.NewNopLogger(), w, 0, 1, func(x chunks.HeadSeriesRef) bool { return true }, 0) + Checkpoint(promslog.NewNopLogger(), w, 0, 1, func(x chunks.HeadSeriesRef) bool { return true }, 0) w.Truncate(1) // Write more records after checkpointing. @@ -394,7 +402,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { _, _, err = Segments(w.Dir()) require.NoError(t, err) - readTimeout = time.Second + overwriteReadTimeout(t, time.Second) wt := newWriteToMock(0) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false) go watcher.Start() @@ -458,7 +466,7 @@ func TestReadCheckpoint(t *testing.T) { } _, err = w.NextSegmentSync() require.NoError(t, err) - _, err = Checkpoint(log.NewNopLogger(), w, 30, 31, func(x chunks.HeadSeriesRef) bool { return true }, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 30, 31, func(x chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) require.NoError(t, w.Truncate(32)) @@ -607,7 +615,7 @@ func TestCheckpointSeriesReset(t *testing.T) { _, _, err = Segments(w.Dir()) require.NoError(t, err) - readTimeout = time.Second + overwriteReadTimeout(t, time.Second) wt := newWriteToMock(0) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false) watcher.MaxSegment = -1 @@ -621,7 +629,7 @@ func TestCheckpointSeriesReset(t *testing.T) { return wt.checkNumSeries() == seriesCount }, 10*time.Second, 1*time.Second) - _, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) err = w.Truncate(5) @@ -742,9 +750,6 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) { const seriesCount = 10 const samplesCount = 50 - // This test can take longer than intended to finish in cloud CI. - readTimeout := 10 * time.Second - for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { t.Run(string(compress), func(t *testing.T) { dir := t.TempDir() @@ -755,36 +760,50 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) { w, err := NewSize(nil, nil, wdir, segmentSize, compress) require.NoError(t, err) - var wg sync.WaitGroup - // Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk. + // Write to 00000000, the watcher will read series from it. require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount)) - w.NextSegment() // Force creation of the next segment - wg.Add(1) - go func() { - defer wg.Done() - for i := 1; i < segmentsToWrite; i++ { - require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount)) - w.NextSegment() - } - }() + // Create 00000001, the watcher will tail it once started. + w.NextSegment() + // Set up the watcher and run it in the background. wt := newWriteToMock(time.Millisecond) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false) + watcher.setMetrics() watcher.MaxSegment = segmentsToRead - watcher.setMetrics() - startTime := time.Now() - err = watcher.Run() - wg.Wait() - require.Less(t, time.Since(startTime), readTimeout) + var g errgroup.Group + g.Go(func() error { + startTime := time.Now() + err = watcher.Run() + if err != nil { + return err + } + // If the watcher was to wait for readTicker to read every new segment, it would need readTimeout * segmentsToRead. + d := time.Since(startTime) + if d > readTimeout { + return fmt.Errorf("watcher ran for %s, it shouldn't rely on readTicker=%s to read the new segments", d, readTimeout) + } + return nil + }) - // But samples records shouldn't get dropped + // The watcher went through 00000000 and is tailing the next one. retry(t, defaultRetryInterval, defaultRetries, func() bool { - return wt.checkNumSeries() > 0 + return wt.checkNumSeries() == seriesCount }) - require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended) - require.NoError(t, err) + // In the meantime, add some new segments in bulk. + // We should end up with segmentsToWrite + 1 segments now. + for i := 1; i < segmentsToWrite; i++ { + require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount)) + w.NextSegment() + } + + // Wait for the watcher. + require.NoError(t, g.Wait()) + + // All series and samples were read. + require.Equal(t, (segmentsToRead+1)*seriesCount, wt.checkNumSeries()) // Series from 00000000 are also read. + require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended) require.NoError(t, w.Close()) }) } diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index b14521f358f..54c257d61a4 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -21,6 +21,7 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" "os" "path/filepath" "slices" @@ -28,11 +29,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -121,7 +121,7 @@ func (e *CorruptionErr) Unwrap() error { } // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. -func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { +func OpenWriteSegment(logger *slog.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { @@ -138,7 +138,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { - level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName) + logger.Warn("Last page of the wlog is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, fmt.Errorf("zero-pad torn page: %w", err) @@ -201,7 +201,7 @@ func ParseCompressionType(compress bool, compressType string) CompressionType { // beyond the most recent segment. type WL struct { dir string - logger log.Logger + logger *slog.Logger segmentSize int mtx sync.RWMutex segment *Segment // Active segment. @@ -286,7 +286,7 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { }, func() float64 { val, err := w.Size() if err != nil { - level.Error(w.logger).Log("msg", "Failed to calculate size of \"wal\" dir", + w.logger.Error("Failed to calculate size of \"wal\" dir", "err", err.Error()) } return float64(val) @@ -309,13 +309,13 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { +func New(logger *slog.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { +func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -323,7 +323,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi return nil, fmt.Errorf("create dir: %w", err) } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var zstdWriter *zstd.Encoder @@ -378,9 +378,9 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } // Open an existing WAL. -func Open(logger log.Logger, dir string) (*WL, error) { +func Open(logger *slog.Logger, dir string) (*WL, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } zstdWriter, err := zstd.NewWriter(nil) if err != nil { @@ -443,7 +443,7 @@ func (w *WL) Repair(origErr error) error { if cerr.Segment < 0 { return errors.New("corruption error does not specify position") } - level.Warn(w.logger).Log("msg", "Starting corruption repair", + w.logger.Warn("Starting corruption repair", "segment", cerr.Segment, "offset", cerr.Offset) // All segments behind the corruption can no longer be used. @@ -451,7 +451,7 @@ func (w *WL) Repair(origErr error) error { if err != nil { return fmt.Errorf("list segments: %w", err) } - level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Deleting all segments newer than corrupted segment", "segment", cerr.Segment) for _, s := range segs { if w.segment.i == s.index { @@ -473,7 +473,7 @@ func (w *WL) Repair(origErr error) error { // Regardless of the corruption offset, no record reaches into the previous segment. // So we can safely repair the WAL by removing the segment and re-inserting all // its records up to the corruption. - level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Rewrite corrupted segment", "segment", cerr.Segment) fn := SegmentName(w.Dir(), cerr.Segment) tmpfn := fn + ".repair" @@ -583,10 +583,10 @@ func (w *WL) nextSegment(async bool) (int, error) { // Don't block further writes by fsyncing the last segment. f := func() { if err := w.fsync(prev); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := prev.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } } if async { @@ -890,10 +890,10 @@ func (w *WL) Close() (err error) { <-donec if err = w.fsync(w.segment); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := w.segment.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } w.metrics.Unregister() diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index 165d2758f04..d195aaee2fc 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -23,14 +23,13 @@ import ( "path/filepath" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "github.com/prometheus/prometheus/tsdb/fileutil" - "github.com/prometheus/prometheus/util/testutil" ) func TestMain(m *testing.M) { @@ -215,7 +214,7 @@ func TestCorruptAndCarryOn(t *testing.T) { dir := t.TempDir() var ( - logger = testutil.NewLogger(t) + logger = promslog.NewNopLogger() segmentSize = pageSize * 3 recordSize = (pageSize / 3) - recordHeaderSize ) @@ -568,7 +567,7 @@ func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() for i := 0; i < 2; i++ { - wl, err := New(log.NewNopLogger(), reg, t.TempDir(), CompressionNone) + wl, err := New(promslog.NewNopLogger(), reg, t.TempDir(), CompressionNone) require.NoError(t, err) require.NoError(t, wl.Close()) } diff --git a/ui-commits b/ui-commits new file mode 100644 index 00000000000..7f34e1f95ac --- /dev/null +++ b/ui-commits @@ -0,0 +1,12 @@ +dfec29d8e Fix border color for target pools with one target that is failing +65743bf9b ui: drop template readme +a7c1a951d Add general Mantine overrides CSS file +0757fbbec Make sure that alert element table headers are not wrapped +0180cf31a Factor out common icon and card styles +50af7d589 Fix tree line drawing by using a callback ref +ac01dc903 Explain, vector-to-vector: Do not compute results for set operators +9b0dc68d0 PromQL explain view: Support set operators +57898c792 Refactor and fix time formatting functions, add tests +091fc403c Fiddle with targets table styles to try and improve things a bit +a1908df92 Don't wrap action buttons below metric name in metrics explorer +ac5377873 mantine UI: Distinguish between Not Ready and Stopping diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index b0272b7fee0..ebe74ecd116 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -146,6 +146,7 @@ var ( PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) + IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) ) type annoErr struct { @@ -273,3 +274,12 @@ func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange. Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName), } } + +// NewIncompatibleTypesInBinOpInfo is used if binary operators act on a +// combination of types that doesn't work and therefore returns no result. +func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType), + } +} diff --git a/util/convertnhcb/convertnhcb.go b/util/convertnhcb/convertnhcb.go new file mode 100644 index 00000000000..5e08422aa06 --- /dev/null +++ b/util/convertnhcb/convertnhcb.go @@ -0,0 +1,173 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convertnhcb + +import ( + "fmt" + "math" + "sort" + "strings" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +// TempHistogram is used to collect information about classic histogram +// samples incrementally before creating a histogram.Histogram or +// histogram.FloatHistogram based on the values collected. +type TempHistogram struct { + BucketCounts map[float64]float64 + Count float64 + Sum float64 + HasFloat bool +} + +// NewTempHistogram creates a new TempHistogram to +// collect information about classic histogram samples. +func NewTempHistogram() TempHistogram { + return TempHistogram{ + BucketCounts: map[float64]float64{}, + } +} + +func (h TempHistogram) getIntBucketCounts() (map[float64]int64, error) { + bucketCounts := map[float64]int64{} + for le, count := range h.BucketCounts { + intCount := int64(math.Round(count)) + if float64(intCount) != count { + return nil, fmt.Errorf("bucket count %f for le %g is not an integer", count, le) + } + bucketCounts[le] = intCount + } + return bucketCounts, nil +} + +// ProcessUpperBoundsAndCreateBaseHistogram prepares an integer native +// histogram with custom buckets based on the provided upper bounds. +// Everything is set except the bucket counts. +// The sorted upper bounds are also returned. +func ProcessUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64, needsDedup bool) ([]float64, *histogram.Histogram) { + sort.Float64s(upperBounds0) + var upperBounds []float64 + if needsDedup { + upperBounds = make([]float64, 0, len(upperBounds0)) + prevLE := math.Inf(-1) + for _, le := range upperBounds0 { + if le != prevLE { + upperBounds = append(upperBounds, le) + prevLE = le + } + } + } else { + upperBounds = upperBounds0 + } + var customBounds []float64 + if upperBounds[len(upperBounds)-1] == math.Inf(1) { + customBounds = upperBounds[:len(upperBounds)-1] + } else { + customBounds = upperBounds + } + return upperBounds, &histogram.Histogram{ + Count: 0, + Sum: 0, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: uint32(len(upperBounds))}, + }, + PositiveBuckets: make([]int64, len(upperBounds)), + CustomValues: customBounds, + } +} + +// NewHistogram fills the bucket counts in the provided histogram.Histogram +// or histogram.FloatHistogram based on the provided temporary histogram and +// upper bounds. +func NewHistogram(histogram TempHistogram, upperBounds []float64, hBase *histogram.Histogram, fhBase *histogram.FloatHistogram) (*histogram.Histogram, *histogram.FloatHistogram) { + intBucketCounts, err := histogram.getIntBucketCounts() + if err != nil { + return nil, newFloatHistogram(histogram, upperBounds, histogram.BucketCounts, fhBase) + } + return newIntegerHistogram(histogram, upperBounds, intBucketCounts, hBase), nil +} + +func newIntegerHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]int64, hBase *histogram.Histogram) *histogram.Histogram { + h := hBase.Copy() + absBucketCounts := make([]int64, len(h.PositiveBuckets)) + var prevCount, total int64 + for i, le := range upperBounds { + currCount, exists := bucketCounts[le] + if !exists { + currCount = 0 + } + count := currCount - prevCount + absBucketCounts[i] = count + total += count + prevCount = currCount + } + h.PositiveBuckets[0] = absBucketCounts[0] + for i := 1; i < len(h.PositiveBuckets); i++ { + h.PositiveBuckets[i] = absBucketCounts[i] - absBucketCounts[i-1] + } + h.Sum = histogram.Sum + if histogram.Count != 0 { + total = int64(histogram.Count) + } + h.Count = uint64(total) + return h.Compact(0) +} + +func newFloatHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]float64, fhBase *histogram.FloatHistogram) *histogram.FloatHistogram { + fh := fhBase.Copy() + var prevCount, total float64 + for i, le := range upperBounds { + currCount, exists := bucketCounts[le] + if !exists { + currCount = 0 + } + count := currCount - prevCount + fh.PositiveBuckets[i] = count + total += count + prevCount = currCount + } + fh.Sum = histogram.Sum + if histogram.Count != 0 { + total = histogram.Count + } + fh.Count = total + return fh.Compact(0) +} + +func GetHistogramMetricBase(m labels.Labels, suffix string) labels.Labels { + mName := m.Get(labels.MetricName) + return labels.NewBuilder(m). + Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). + Del(labels.BucketLabel). + Labels() +} + +// GetHistogramMetricBaseName removes the suffixes _bucket, _sum, _count from +// the metric name. We specifically do not remove the _created suffix as that +// should be removed by the caller. +func GetHistogramMetricBaseName(s string) string { + if r, ok := strings.CutSuffix(s, "_bucket"); ok { + return r + } + if r, ok := strings.CutSuffix(s, "_sum"); ok { + return r + } + if r, ok := strings.CutSuffix(s, "_count"); ok { + return r + } + return s +} diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 9034a90fa70..a10908bb8c0 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -113,7 +113,7 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue()) case m.Summary != nil: metricName := labels[model.MetricNameLabel] - // Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie + // Preserve metric name order with first quantile labels timeseries then sum suffix timeseries and finally count suffix timeseries // Add Summary quantile timeseries quantileLabels := make(map[string]string, len(labels)+1) for key, value := range labels { @@ -125,16 +125,16 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me toTimeseries(wr, quantileLabels, timestamp, q.GetValue()) } // Overwrite label model.MetricNameLabel for count and sum metrics - // Add Summary sum timeserie + // Add Summary sum timeseries labels[model.MetricNameLabel] = metricName + sumStr toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum()) - // Add Summary count timeserie + // Add Summary count timeseries labels[model.MetricNameLabel] = metricName + countStr toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) case m.Histogram != nil: metricName := labels[model.MetricNameLabel] - // Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie + // Preserve metric name order with first bucket suffix timeseries then sum suffix timeseries and finally count suffix timeseries // Add Histogram bucket timeseries bucketLabels := make(map[string]string, len(labels)+1) for key, value := range labels { @@ -146,10 +146,10 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) } // Overwrite label model.MetricNameLabel for count and sum metrics - // Add Histogram sum timeserie + // Add Histogram sum timeseries labels[model.MetricNameLabel] = metricName + sumStr toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) - // Add Histogram count timeserie + // Add Histogram count timeseries labels[model.MetricNameLabel] = metricName + countStr toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) diff --git a/util/logging/dedupe.go b/util/logging/dedupe.go index d490a6afdf1..d5aee5c095c 100644 --- a/util/logging/dedupe.go +++ b/util/logging/dedupe.go @@ -14,12 +14,10 @@ package logging import ( - "bytes" + "context" + "log/slog" "sync" "time" - - "github.com/go-kit/log" - "github.com/go-logfmt/logfmt" ) const ( @@ -28,22 +26,9 @@ const ( maxEntries = 1024 ) -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -// Deduper implement log.Logger, dedupes log lines. +// Deduper implements *slog.Handler, dedupes log lines based on a time duration. type Deduper struct { - next log.Logger + next *slog.Logger repeat time.Duration quit chan struct{} mtx sync.RWMutex @@ -51,7 +36,7 @@ type Deduper struct { } // Dedupe log lines to next, only repeating every repeat duration. -func Dedupe(next log.Logger, repeat time.Duration) *Deduper { +func Dedupe(next *slog.Logger, repeat time.Duration) *Deduper { d := &Deduper{ next: next, repeat: repeat, @@ -62,6 +47,63 @@ func Dedupe(next log.Logger, repeat time.Duration) *Deduper { return d } +// Enabled returns true if the Deduper's internal slog.Logger is enabled at the +// provided context and log level, and returns false otherwise. It implements +// slog.Handler. +func (d *Deduper) Enabled(ctx context.Context, level slog.Level) bool { + return d.next.Enabled(ctx, level) +} + +// Handle uses the provided context and slog.Record to deduplicate messages +// every 1m. Log records received within the interval are not acted on, and +// thus dropped. Log records that pass deduplication and need action invoke the +// Handle() method on the Deduper's internal slog.Logger's handler, effectively +// chaining log calls to the internal slog.Logger. +func (d *Deduper) Handle(ctx context.Context, r slog.Record) error { + line := r.Message + d.mtx.RLock() + last, ok := d.seen[line] + d.mtx.RUnlock() + + if ok && time.Since(last) < d.repeat { + return nil + } + + d.mtx.Lock() + if len(d.seen) < maxEntries { + d.seen[line] = time.Now() + } + d.mtx.Unlock() + + return d.next.Handler().Handle(ctx, r.Clone()) +} + +// WithAttrs adds the provided attributes to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithAttrs(attrs []slog.Attr) slog.Handler { + return &Deduper{ + next: slog.New(d.next.Handler().WithAttrs(attrs)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } +} + +// WithGroup adds the provided group name to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithGroup(name string) slog.Handler { + if name == "" { + return d + } + + return &Deduper{ + next: slog.New(d.next.Handler().WithGroup(name)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } +} + // Stop the Deduper. func (d *Deduper) Stop() { close(d.quit) @@ -87,44 +129,3 @@ func (d *Deduper) run() { } } } - -// Log implements log.Logger. -func (d *Deduper) Log(keyvals ...interface{}) error { - line, err := encode(keyvals...) - if err != nil { - return err - } - - d.mtx.RLock() - last, ok := d.seen[line] - d.mtx.RUnlock() - - if ok && time.Since(last) < d.repeat { - return nil - } - - d.mtx.Lock() - if len(d.seen) < maxEntries { - d.seen[line] = time.Now() - } - d.mtx.Unlock() - - return d.next.Log(keyvals...) -} - -func encode(keyvals ...interface{}) (string, error) { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.buf.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return "", err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return "", err - } - - return enc.buf.String(), nil -} diff --git a/util/logging/dedupe_test.go b/util/logging/dedupe_test.go index e05d6454c50..5baa90b0386 100644 --- a/util/logging/dedupe_test.go +++ b/util/logging/dedupe_test.go @@ -14,34 +14,45 @@ package logging import ( + "bytes" + "log/slog" + "strings" "testing" "time" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) -type counter int - -func (c *counter) Log(...interface{}) error { - (*c)++ - return nil -} - func TestDedupe(t *testing.T) { - var c counter - d := Dedupe(&c, 100*time.Millisecond) + var buf bytes.Buffer + d := Dedupe(promslog.New(&promslog.Config{Writer: &buf}), 100*time.Millisecond) + dlog := slog.New(d) defer d.Stop() // Log 10 times quickly, ensure they are deduped. for i := 0; i < 10; i++ { - err := d.Log("msg", "hello") - require.NoError(t, err) + dlog.Info("test", "hello", "world") } - require.Equal(t, 1, int(c)) + + // Trim empty lines + lines := []string{} + for _, line := range strings.Split(buf.String(), "\n") { + if line != "" { + lines = append(lines, line) + } + } + require.Len(t, lines, 1) // Wait, then log again, make sure it is logged. time.Sleep(200 * time.Millisecond) - err := d.Log("msg", "hello") - require.NoError(t, err) - require.Equal(t, 2, int(c)) + dlog.Info("test", "hello", "world") + // Trim empty lines + lines = []string{} + for _, line := range strings.Split(buf.String(), "\n") { + if line != "" { + lines = append(lines, line) + } + } + require.Len(t, lines, 2) } diff --git a/util/logging/file.go b/util/logging/file.go index 2afa828547f..9db7fb722be 100644 --- a/util/logging/file.go +++ b/util/logging/file.go @@ -14,21 +14,17 @@ package logging import ( + "context" "fmt" + "log/slog" "os" - "time" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" ) -var timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", -) - -// JSONFileLogger represents a logger that writes JSON to a file. +// JSONFileLogger represents a logger that writes JSON to a file. It implements the promql.QueryLogger interface. type JSONFileLogger struct { - logger log.Logger + logger *slog.Logger file *os.File } @@ -40,21 +36,30 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { - return nil, fmt.Errorf("can't create json logger: %w", err) + return nil, fmt.Errorf("can't create json log file: %w", err) } + jsonFmt := &promslog.AllowedFormat{} + _ = jsonFmt.Set("json") return &JSONFileLogger{ - logger: log.With(log.NewJSONLogger(f), "ts", timestampFormat), + logger: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}), file: f, }, nil } -// Close closes the underlying file. +// Close closes the underlying file. It implements the promql.QueryLogger interface. func (l *JSONFileLogger) Close() error { return l.file.Close() } -// Log calls the Log function of the underlying logger. -func (l *JSONFileLogger) Log(i ...interface{}) error { - return l.logger.Log(i...) +// With calls the `With()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) With(args ...any) { + l.logger = l.logger.With(args...) +} + +// Log calls the `Log()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + l.logger.Log(ctx, level, msg, args...) } diff --git a/util/logging/file_test.go b/util/logging/file_test.go index 0e760a4848a..c9f7240feeb 100644 --- a/util/logging/file_test.go +++ b/util/logging/file_test.go @@ -14,6 +14,8 @@ package logging import ( + "context" + "log/slog" "os" "strings" "testing" @@ -34,12 +36,13 @@ func TestJSONFileLogger_basic(t *testing.T) { require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - err = l.Log("test", "yes") + l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") require.NoError(t, err) r := make([]byte, 1024) _, err = f.Read(r) require.NoError(t, err) - result, err := regexp.Match(`^{"test":"yes","ts":"[^"]+"}\n`, r) + + result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"file.go:\d+","msg":"test","hello":"world"}\n`, r) require.NoError(t, err) require.True(t, result, "unexpected content: %s", r) @@ -63,14 +66,14 @@ func TestJSONFileLogger_parallel(t *testing.T) { require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - err = l.Log("test", "yes") + l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") require.NoError(t, err) l2, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - err = l2.Log("test", "yes") + l2.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") require.NoError(t, err) err = l.Close() diff --git a/util/logging/ratelimit.go b/util/logging/ratelimit.go deleted file mode 100644 index 32d1e249e68..00000000000 --- a/util/logging/ratelimit.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logging - -import ( - "github.com/go-kit/log" - "golang.org/x/time/rate" -) - -type ratelimiter struct { - limiter *rate.Limiter - next log.Logger -} - -// RateLimit write to a logger. -func RateLimit(next log.Logger, limit rate.Limit) log.Logger { - return &ratelimiter{ - limiter: rate.NewLimiter(limit, int(limit)), - next: next, - } -} - -func (r *ratelimiter) Log(keyvals ...interface{}) error { - if r.limiter.Allow() { - return r.next.Log(keyvals...) - } - return nil -} diff --git a/util/notifications/notifications.go b/util/notifications/notifications.go new file mode 100644 index 00000000000..4888a0b6641 --- /dev/null +++ b/util/notifications/notifications.go @@ -0,0 +1,185 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifications + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + ConfigurationUnsuccessful = "Configuration reload has failed." + StartingUp = "Prometheus is starting and replaying the write-ahead log (WAL)." + ShuttingDown = "Prometheus is shutting down and gracefully stopping all operations." +) + +// Notification represents an individual notification message. +type Notification struct { + Text string `json:"text"` + Date time.Time `json:"date"` + Active bool `json:"active"` +} + +// Notifications stores a list of Notification objects. +// It also manages live subscribers that receive notifications via channels. +type Notifications struct { + mu sync.Mutex + notifications []Notification + subscribers map[chan Notification]struct{} // Active subscribers. + maxSubscribers int + + subscriberGauge prometheus.Gauge + notificationsSent prometheus.Counter + notificationsDropped prometheus.Counter +} + +// NewNotifications creates a new Notifications instance. +func NewNotifications(maxSubscribers int, reg prometheus.Registerer) *Notifications { + n := &Notifications{ + subscribers: make(map[chan Notification]struct{}), + maxSubscribers: maxSubscribers, + subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_active_subscribers", + Help: "The current number of active notification subscribers.", + }), + notificationsSent: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_sent_total", + Help: "Total number of notification updates sent.", + }), + notificationsDropped: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_dropped_total", + Help: "Total number of notification updates dropped.", + }), + } + + if reg != nil { + reg.MustRegister(n.subscriberGauge, n.notificationsSent, n.notificationsDropped) + } + + return n +} + +// AddNotification adds a new notification or updates the timestamp if it already exists. +func (n *Notifications) AddNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + for i, notification := range n.notifications { + if notification.Text == text { + n.notifications[i].Date = time.Now() + + n.notifySubscribers(n.notifications[i]) + return + } + } + + newNotification := Notification{ + Text: text, + Date: time.Now(), + Active: true, + } + n.notifications = append(n.notifications, newNotification) + + n.notifySubscribers(newNotification) +} + +// notifySubscribers sends a notification to all active subscribers. +func (n *Notifications) notifySubscribers(notification Notification) { + for sub := range n.subscribers { + // Non-blocking send to avoid subscriber blocking issues. + n.notificationsSent.Inc() + select { + case sub <- notification: + // Notification sent to the subscriber. + default: + // Drop the notification if the subscriber's channel is full. + n.notificationsDropped.Inc() + } + } +} + +// DeleteNotification removes the first notification that matches the provided text. +// The deleted notification is sent to subscribers with Active: false before being removed. +func (n *Notifications) DeleteNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + // Iterate through the notifications to find the matching text. + for i, notification := range n.notifications { + if notification.Text == text { + // Mark the notification as inactive and notify subscribers. + notification.Active = false + n.notifySubscribers(notification) + + // Remove the notification from the list. + n.notifications = append(n.notifications[:i], n.notifications[i+1:]...) + return + } + } +} + +// Get returns a copy of the list of notifications for safe access outside the struct. +func (n *Notifications) Get() []Notification { + n.mu.Lock() + defer n.mu.Unlock() + + // Return a copy of the notifications slice to avoid modifying the original slice outside. + notificationsCopy := make([]Notification, len(n.notifications)) + copy(notificationsCopy, n.notifications) + return notificationsCopy +} + +// Sub allows a client to subscribe to live notifications. +// It returns a channel where the subscriber will receive notifications and a function to unsubscribe. +// Each subscriber has its own goroutine to handle notifications and prevent blocking. +func (n *Notifications) Sub() (<-chan Notification, func(), bool) { + n.mu.Lock() + defer n.mu.Unlock() + + if len(n.subscribers) >= n.maxSubscribers { + return nil, nil, false + } + + ch := make(chan Notification, 10) // Buffered channel to prevent blocking. + + // Add the new subscriber to the list. + n.subscribers[ch] = struct{}{} + n.subscriberGauge.Set(float64(len(n.subscribers))) + + // Send all current notifications to the new subscriber. + for _, notification := range n.notifications { + ch <- notification + } + + // Unsubscribe function to remove the channel from subscribers. + unsubscribe := func() { + n.mu.Lock() + defer n.mu.Unlock() + + // Close the channel and remove it from the subscribers map. + close(ch) + delete(n.subscribers, ch) + n.subscriberGauge.Set(float64(len(n.subscribers))) + } + + return ch, unsubscribe, true +} diff --git a/util/notifications/notifications_test.go b/util/notifications/notifications_test.go new file mode 100644 index 00000000000..e487e9ce548 --- /dev/null +++ b/util/notifications/notifications_test.go @@ -0,0 +1,223 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifications + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestNotificationLifecycle tests adding, modifying, and deleting notifications. +func TestNotificationLifecycle(t *testing.T) { + notifs := NewNotifications(10, nil) + + // Add a notification. + notifs.AddNotification("Test Notification 1") + + // Check if the notification was added. + notifications := notifs.Get() + require.Len(t, notifications, 1, "Expected 1 notification after addition.") + require.Equal(t, "Test Notification 1", notifications[0].Text, "Notification text mismatch.") + require.True(t, notifications[0].Active, "Expected notification to be active.") + + // Modify the notification. + notifs.AddNotification("Test Notification 1") + notifications = notifs.Get() + require.Len(t, notifications, 1, "Expected 1 notification after modification.") + + // Delete the notification. + notifs.DeleteNotification("Test Notification 1") + notifications = notifs.Get() + require.Empty(t, notifications, "Expected no notifications after deletion.") +} + +// TestSubscriberReceivesNotifications tests that a subscriber receives notifications, including modifications and deletions. +func TestSubscriberReceivesNotifications(t *testing.T) { + notifs := NewNotifications(10, nil) + + // Subscribe to notifications. + sub, unsubscribe, ok := notifs.Sub() + require.True(t, ok) + + var wg sync.WaitGroup + wg.Add(1) + + receivedNotifications := make([]Notification, 0) + + // Goroutine to listen for notifications. + go func() { + defer wg.Done() + for notification := range sub { + receivedNotifications = append(receivedNotifications, notification) + } + }() + + // Add notifications. + notifs.AddNotification("Test Notification 1") + notifs.AddNotification("Test Notification 2") + + // Modify a notification. + notifs.AddNotification("Test Notification 1") + + // Delete a notification. + notifs.DeleteNotification("Test Notification 2") + + // Wait for notifications to propagate. + time.Sleep(100 * time.Millisecond) + + unsubscribe() + wg.Wait() // Wait for the subscriber goroutine to finish. + + // Verify that we received the expected number of notifications. + require.Len(t, receivedNotifications, 4, "Expected 4 notifications (2 active, 1 modified, 1 deleted).") + + // Check the content and state of received notifications. + expected := []struct { + Text string + Active bool + }{ + {"Test Notification 1", true}, + {"Test Notification 2", true}, + {"Test Notification 1", true}, + {"Test Notification 2", false}, + } + + for i, n := range receivedNotifications { + require.Equal(t, expected[i].Text, n.Text, "Notification text mismatch at index %d.", i) + require.Equal(t, expected[i].Active, n.Active, "Notification active state mismatch at index %d.", i) + } +} + +// TestMultipleSubscribers tests that multiple subscribers receive notifications independently. +func TestMultipleSubscribers(t *testing.T) { + notifs := NewNotifications(10, nil) + + // Subscribe two subscribers to notifications. + sub1, unsubscribe1, ok1 := notifs.Sub() + require.True(t, ok1) + + sub2, unsubscribe2, ok2 := notifs.Sub() + require.True(t, ok2) + + var wg sync.WaitGroup + wg.Add(2) + + receivedSub1 := make([]Notification, 0) + receivedSub2 := make([]Notification, 0) + + // Goroutine for subscriber 1. + go func() { + defer wg.Done() + for notification := range sub1 { + receivedSub1 = append(receivedSub1, notification) + } + }() + + // Goroutine for subscriber 2. + go func() { + defer wg.Done() + for notification := range sub2 { + receivedSub2 = append(receivedSub2, notification) + } + }() + + // Add and delete notifications. + notifs.AddNotification("Test Notification 1") + notifs.DeleteNotification("Test Notification 1") + + // Wait for notifications to propagate. + time.Sleep(100 * time.Millisecond) + + // Unsubscribe both. + unsubscribe1() + unsubscribe2() + + wg.Wait() + + // Both subscribers should have received the same 2 notifications. + require.Len(t, receivedSub1, 2, "Expected 2 notifications for subscriber 1.") + require.Len(t, receivedSub2, 2, "Expected 2 notifications for subscriber 2.") + + // Verify that both subscribers received the same notifications. + for i := 0; i < 2; i++ { + require.Equal(t, receivedSub1[i], receivedSub2[i], "Subscriber notification mismatch at index %d.", i) + } +} + +// TestUnsubscribe tests that unsubscribing prevents further notifications from being received. +func TestUnsubscribe(t *testing.T) { + notifs := NewNotifications(10, nil) + + // Subscribe to notifications. + sub, unsubscribe, ok := notifs.Sub() + require.True(t, ok) + + var wg sync.WaitGroup + wg.Add(1) + + receivedNotifications := make([]Notification, 0) + + // Goroutine to listen for notifications. + go func() { + defer wg.Done() + for notification := range sub { + receivedNotifications = append(receivedNotifications, notification) + } + }() + + // Add a notification and then unsubscribe. + notifs.AddNotification("Test Notification 1") + time.Sleep(100 * time.Millisecond) // Allow time for notification delivery. + unsubscribe() // Unsubscribe. + + // Add another notification after unsubscribing. + notifs.AddNotification("Test Notification 2") + + // Wait for the subscriber goroutine to finish. + wg.Wait() + + // Only the first notification should have been received. + require.Len(t, receivedNotifications, 1, "Expected 1 notification before unsubscribe.") + require.Equal(t, "Test Notification 1", receivedNotifications[0].Text, "Unexpected notification text.") +} + +// TestMaxSubscribers tests that exceeding the max subscribers limit prevents additional subscriptions. +func TestMaxSubscribers(t *testing.T) { + maxSubscribers := 2 + notifs := NewNotifications(maxSubscribers, nil) + + // Subscribe the maximum number of subscribers. + _, unsubscribe1, ok1 := notifs.Sub() + require.True(t, ok1, "Expected first subscription to succeed.") + + _, unsubscribe2, ok2 := notifs.Sub() + require.True(t, ok2, "Expected second subscription to succeed.") + + // Try to subscribe more than the max allowed. + _, _, ok3 := notifs.Sub() + require.False(t, ok3, "Expected third subscription to fail due to max subscriber limit.") + + // Unsubscribe one subscriber and try again. + unsubscribe1() + + _, unsubscribe4, ok4 := notifs.Sub() + require.True(t, ok4, "Expected subscription to succeed after unsubscribing a subscriber.") + + // Clean up the subscriptions. + unsubscribe2() + unsubscribe4() +} diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index 0126adb1a84..156747d450b 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -23,7 +23,7 @@ import ( // syscall.RLIM_INFINITY is a constant. // Its type is int on most architectures but there are exceptions such as loong64. -// Uniform it to uint accorind to the standard. +// Uniform it to uint according to the standard. // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64 diff --git a/util/teststorage/storage.go b/util/teststorage/storage.go index 7d1f9dda242..e15d591e0c7 100644 --- a/util/teststorage/storage.go +++ b/util/teststorage/storage.go @@ -30,15 +30,15 @@ import ( // New returns a new TestStorage for testing purposes // that removes all associated files on closing. -func New(t testutil.T) *TestStorage { - stor, err := NewWithError() +func New(t testutil.T, outOfOrderTimeWindow ...int64) *TestStorage { + stor, err := NewWithError(outOfOrderTimeWindow...) require.NoError(t, err) return stor } // NewWithError returns a new TestStorage for user facing tests, which reports // errors directly. -func NewWithError() (*TestStorage, error) { +func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) { dir, err := os.MkdirTemp("", "test_storage") if err != nil { return nil, fmt.Errorf("opening test directory: %w", err) @@ -51,6 +51,14 @@ func NewWithError() (*TestStorage, error) { opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond) opts.RetentionDuration = 0 opts.EnableNativeHistograms = true + + // Set OutOfOrderTimeWindow if provided, otherwise use default (0) + if len(outOfOrderTimeWindow) > 0 { + opts.OutOfOrderTimeWindow = outOfOrderTimeWindow[0] + } else { + opts.OutOfOrderTimeWindow = 0 // Default value is zero + } + db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats()) if err != nil { return nil, fmt.Errorf("opening test storage: %w", err) diff --git a/util/testutil/port.go b/util/testutil/port.go index 1e449b123d3..7cf4cf1ccc9 100644 --- a/util/testutil/port.go +++ b/util/testutil/port.go @@ -15,21 +15,56 @@ package testutil import ( "net" + "sync" "testing" ) +var ( + mu sync.Mutex + usedPorts []int +) + // RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing. func RandomUnprivilegedPort(t *testing.T) int { t.Helper() + mu.Lock() + defer mu.Unlock() + + port, err := getPort() + if err != nil { + t.Fatal(err) + } + + for portWasUsed(port) { + port, err = getPort() + if err != nil { + t.Fatal(err) + } + } + + usedPorts = append(usedPorts, port) + + return port +} + +func portWasUsed(port int) bool { + for _, usedPort := range usedPorts { + if port == usedPort { + return true + } + } + return false +} +func getPort() (int, error) { listener, err := net.Listen("tcp", ":0") if err != nil { - t.Fatalf("Listening on random port: %v", err) + return 0, err } if err := listener.Close(); err != nil { - t.Fatalf("Closing listener: %v", err) + return 0, err } - return listener.Addr().(*net.TCPAddr).Port + return listener.Addr().(*net.TCPAddr).Port, nil } diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index bbbaaf3d6e9..4d4b6f544cf 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -17,12 +17,11 @@ import ( "bytes" "errors" "fmt" + "log/slog" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/go-zookeeper/zk" "github.com/prometheus/client_golang/prometheus" ) @@ -47,19 +46,19 @@ func init() { prometheus.MustRegister(numWatchers) } -// ZookeeperLogger wraps a log.Logger into a zk.Logger. +// ZookeeperLogger wraps a *slog.Logger into a zk.Logger. type ZookeeperLogger struct { - logger log.Logger + logger *slog.Logger } // NewZookeeperLogger is a constructor for ZookeeperLogger. -func NewZookeeperLogger(logger log.Logger) ZookeeperLogger { +func NewZookeeperLogger(logger *slog.Logger) ZookeeperLogger { return ZookeeperLogger{logger: logger} } // Printf implements zk.Logger. func (zl ZookeeperLogger) Printf(s string, i ...interface{}) { - level.Info(zl.logger).Log("msg", fmt.Sprintf(s, i...)) + zl.logger.Info(s, i...) } // A ZookeeperTreeCache keeps data from all children of a Zookeeper path @@ -72,7 +71,7 @@ type ZookeeperTreeCache struct { wg *sync.WaitGroup head *zookeeperTreeCacheNode - logger log.Logger + logger *slog.Logger } // A ZookeeperTreeCacheEvent models a Zookeeper event for a path. @@ -90,7 +89,7 @@ type zookeeperTreeCacheNode struct { } // NewZookeeperTreeCache creates a new ZookeeperTreeCache for a given path. -func NewZookeeperTreeCache(conn *zk.Conn, path string, events chan ZookeeperTreeCacheEvent, logger log.Logger) *ZookeeperTreeCache { +func NewZookeeperTreeCache(conn *zk.Conn, path string, events chan ZookeeperTreeCacheEvent, logger *slog.Logger) *ZookeeperTreeCache { tc := &ZookeeperTreeCache{ conn: conn, prefix: path, @@ -144,20 +143,20 @@ func (tc *ZookeeperTreeCache) loop(path string) { err := tc.recursiveNodeUpdate(path, tc.head) if err != nil { - level.Error(tc.logger).Log("msg", "Error during initial read of Zookeeper", "err", err) + tc.logger.Error("Error during initial read of Zookeeper", "err", err) failure() } for { select { case ev := <-tc.head.events: - level.Debug(tc.logger).Log("msg", "Received Zookeeper event", "event", ev) + tc.logger.Debug("Received Zookeeper event", "event", ev) if failureMode { continue } if ev.Type == zk.EventNotWatching { - level.Info(tc.logger).Log("msg", "Lost connection to Zookeeper.") + tc.logger.Info("Lost connection to Zookeeper.") failure() } else { path := strings.TrimPrefix(ev.Path, tc.prefix) @@ -178,15 +177,15 @@ func (tc *ZookeeperTreeCache) loop(path string) { switch err := tc.recursiveNodeUpdate(ev.Path, node); { case err != nil: - level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err) + tc.logger.Error("Error during processing of Zookeeper event", "err", err) failure() case tc.head.data == nil: - level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) + tc.logger.Error("Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) failure() } } case <-retryChan: - level.Info(tc.logger).Log("msg", "Attempting to resync state with Zookeeper") + tc.logger.Info("Attempting to resync state with Zookeeper") previousState := &zookeeperTreeCacheNode{ children: tc.head.children, } @@ -194,13 +193,13 @@ func (tc *ZookeeperTreeCache) loop(path string) { tc.head.children = make(map[string]*zookeeperTreeCacheNode) if err := tc.recursiveNodeUpdate(tc.prefix, tc.head); err != nil { - level.Error(tc.logger).Log("msg", "Error during Zookeeper resync", "err", err) + tc.logger.Error("Error during Zookeeper resync", "err", err) // Revert to our previous state. tc.head.children = previousState.children failure() } else { tc.resyncState(tc.prefix, tc.head, previousState) - level.Info(tc.logger).Log("msg", "Zookeeper resync successful") + tc.logger.Info("Zookeeper resync successful") failureMode = false } case <-tc.stop: diff --git a/util/zeropool/pool_test.go b/util/zeropool/pool_test.go index fea8200226b..e9793f64d7f 100644 --- a/util/zeropool/pool_test.go +++ b/util/zeropool/pool_test.go @@ -81,7 +81,7 @@ func TestPool(t *testing.T) { t.Run("does not allocate", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) - // Warm up, this will alloate one slice. + // Warm up, this will allocate one slice. slice := pool.Get() pool.Put(slice) diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 5584c350b0a..dcdafb0a47f 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,21 @@ # Changelog +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + ## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md index 36de276a074..6fe4f0763e3 100644 --- a/vendor/cloud.google.com/go/auth/README.md +++ b/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index bc37ea85fb5..32fb058df0c 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -328,7 +328,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err defer c.mu.Unlock() return c.cachedToken, nil case stale: - c.tokenAsync(ctx) + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) // Return the stale token immediately to not block customer requests to Cloud services. c.mu.Lock() defer c.mu.Unlock() diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index cf56b025a23..6591b181132 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -124,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4d5d..6ae29de6c27 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -17,6 +17,7 @@ package credentials import ( "context" "crypto/rsa" + "errors" "fmt" "strings" "time" @@ -35,6 +36,9 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } pk, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 274bb01254c..93c0b1369fe 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -178,13 +179,23 @@ type authTransport struct { clientUniverseDomain string } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 4308345eda3..66a51f19c73 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -38,8 +38,11 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2793..da7db19b1c6 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b7297..c160b4786bb 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -456,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string code = res.StatusCode } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f8917e..2e53f012300 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/fake.go new file mode 100644 index 00000000000..3dfc74e4bf1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/fake.go @@ -0,0 +1,139 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package fake provides the building blocks for fake servers. +// This includes fakes for authentication, API responses, and more. +package fake + +import ( + "context" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// TokenCredential is a fake credential that implements the azcore.TokenCredential interface. +type TokenCredential struct { + err error +} + +// SetError sets the specified error to be returned from GetToken(). +// Use this to simulate an error during authentication. +func (t *TokenCredential) SetError(err error) { + t.err = errorinfo.NonRetriableError(err) +} + +// GetToken implements the azcore.TokenCredential for the TokenCredential type. +func (t *TokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if t.err != nil { + return azcore.AccessToken{}, errorinfo.NonRetriableError(t.err) + } + return azcore.AccessToken{Token: "fake_token", ExpiresOn: time.Now().Add(24 * time.Hour)}, nil +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Responder represents a scalar response. +type Responder[T any] exported.Responder[T] + +// SetResponse sets the specified value to be returned. +// - httpStatus is the HTTP status code to be returned +// - resp is the response to be returned +// - o contains optional values, pass nil to accept the defaults +func (r *Responder[T]) SetResponse(httpStatus int, resp T, o *SetResponseOptions) { + (*exported.Responder[T])(r).SetResponse(httpStatus, resp, o) +} + +// SetResponseOptions contains the optional values for Responder[T].SetResponse. +type SetResponseOptions = exported.SetResponseOptions + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// ErrorResponder represents a scalar error response. +type ErrorResponder exported.ErrorResponder + +// SetError sets the specified error to be returned. +// Use SetResponseError for returning an *azcore.ResponseError. +func (e *ErrorResponder) SetError(err error) { + (*exported.ErrorResponder)(e).SetError(err) +} + +// SetResponseError sets an *azcore.ResponseError with the specified values to be returned. +// - errorCode is the value to be used in the ResponseError.Code field +// - httpStatus is the HTTP status code +func (e *ErrorResponder) SetResponseError(httpStatus int, errorCode string) { + (*exported.ErrorResponder)(e).SetResponseError(httpStatus, errorCode) +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// PagerResponder represents a sequence of paged responses. +// Responses are consumed in the order in which they were added. +// If no pages or errors have been added, calls to Pager[T].NextPage +// will return an error. +type PagerResponder[T any] exported.PagerResponder[T] + +// AddPage adds a page to the sequence of respones. +// - page is the response page to be added +// - o contains optional values, pass nil to accept the defaults +func (p *PagerResponder[T]) AddPage(httpStatus int, page T, o *AddPageOptions) { + (*exported.PagerResponder[T])(p).AddPage(httpStatus, page, o) +} + +// AddError adds an error to the sequence of responses. +// The error is returned from the call to runtime.Pager[T].NextPage(). +func (p *PagerResponder[T]) AddError(err error) { + (*exported.PagerResponder[T])(p).AddError(err) +} + +// AddResponseError adds an *azcore.ResponseError to the sequence of responses. +// The error is returned from the call to runtime.Pager[T].NextPage(). +func (p *PagerResponder[T]) AddResponseError(httpStatus int, errorCode string) { + (*exported.PagerResponder[T])(p).AddResponseError(httpStatus, errorCode) +} + +// AddPageOptions contains the optional values for PagerResponder[T].AddPage. +type AddPageOptions = exported.AddPageOptions + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// PollerResponder represents a sequence of responses for a long-running operation. +// Any non-terminal responses are consumed in the order in which they were added. +// The terminal response, success or error, is always the final response. +// If no responses or errors have been added, the following method calls on Poller[T] +// will return an error: PollUntilDone, Poll, Result. +type PollerResponder[T any] exported.PollerResponder[T] + +// AddNonTerminalResponse adds a non-terminal response to the sequence of responses. +func (p *PollerResponder[T]) AddNonTerminalResponse(httpStatus int, o *AddNonTerminalResponseOptions) { + (*exported.PollerResponder[T])(p).AddNonTerminalResponse(httpStatus, o) +} + +// AddPollingError adds an error to the sequence of responses. +// Use this to simulate an error durring polling. +// NOTE: adding this as the first response will cause the Begin* LRO API to return this error. +func (p *PollerResponder[T]) AddPollingError(err error) { + (*exported.PollerResponder[T])(p).AddPollingError(err) +} + +// SetTerminalResponse sets the provided value as the successful, terminal response. +func (p *PollerResponder[T]) SetTerminalResponse(httpStatus int, result T, o *SetTerminalResponseOptions) { + (*exported.PollerResponder[T])(p).SetTerminalResponse(httpStatus, result, o) +} + +// SetTerminalError sets an *azcore.ResponseError with the specified values as the failed terminal response. +func (p *PollerResponder[T]) SetTerminalError(httpStatus int, errorCode string) { + (*exported.PollerResponder[T])(p).SetTerminalError(httpStatus, errorCode) +} + +// AddNonTerminalResponseOptions contains the optional values for PollerResponder[T].AddNonTerminalResponse. +type AddNonTerminalResponseOptions = exported.AddNonTerminalResponseOptions + +// SetTerminalResponseOptions contains the optional values for PollerResponder[T].SetTerminalResponse. +type SetTerminalResponseOptions = exported.SetTerminalResponseOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/internal/exported/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/internal/exported/fake.go new file mode 100644 index 00000000000..2a77cddb895 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/internal/exported/fake.go @@ -0,0 +1,410 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// Responder represents a scalar response. +type Responder[T any] struct { + httpStatus int + resp T + opts SetResponseOptions +} + +// SetResponse sets the specified value to be returned. +// - httpStatus is the HTTP status code to be returned +// - resp is the response to be returned +// - o contains optional values, pass nil to accept the defaults +func (r *Responder[T]) SetResponse(httpStatus int, resp T, o *SetResponseOptions) { + r.httpStatus = httpStatus + r.resp = resp + if o != nil { + r.opts = *o + } +} + +// SetResponseOptions contains the optional values for Responder[T].SetResponse. +type SetResponseOptions struct { + // Header contains optional HTTP headers to include in the response. + Header http.Header +} + +// GetResponse returns the response associated with the Responder. +// This function is called by the fake server internals. +func (r Responder[T]) GetResponse() T { + return r.resp +} + +// GetResponseContent returns the ResponseContent associated with the Responder. +// This function is called by the fake server internals. +func (r Responder[T]) GetResponseContent() ResponseContent { + return ResponseContent{HTTPStatus: r.httpStatus, Header: r.opts.Header} +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// ErrorResponder represents a scalar error response. +type ErrorResponder struct { + err error +} + +// SetError sets the specified error to be returned. +// Use SetResponseError for returning an *azcore.ResponseError. +func (e *ErrorResponder) SetError(err error) { + e.err = errorinfo.NonRetriableError(err) +} + +// SetResponseError sets an *azcore.ResponseError with the specified values to be returned. +// - errorCode is the value to be used in the ResponseError.Code field +// - httpStatus is the HTTP status code +func (e *ErrorResponder) SetResponseError(httpStatus int, errorCode string) { + e.err = errorinfo.NonRetriableError(&exported.ResponseError{ErrorCode: errorCode, StatusCode: httpStatus}) +} + +// GetError returns the error for this responder. +// This function is called by the fake server internals. +func (e ErrorResponder) GetError(req *http.Request) error { + if e.err == nil { + return nil + } + + var respErr *azcore.ResponseError + if errors.As(e.err, &respErr) { + // fix up the raw response + rawResp, err := newErrorResponse(respErr.StatusCode, respErr.ErrorCode, req) + if err != nil { + return errorinfo.NonRetriableError(err) + } + respErr.RawResponse = rawResp + } + return errorinfo.NonRetriableError(e.err) +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// PagerResponder represents a sequence of paged responses. +// Responses are replayed in the order in which they were added. +type PagerResponder[T any] struct { + pages []any +} + +// AddPage adds a page to the sequence of respones. +// - page is the response page to be added +// - o contains optional values, pass nil to accept the defaults +func (p *PagerResponder[T]) AddPage(httpStatus int, page T, o *AddPageOptions) { + p.pages = append(p.pages, pageResp[T]{httpStatus: httpStatus, entry: page}) +} + +// AddError adds an error to the sequence of responses. +// The error is returned from the call to runtime.Pager[T].NextPage(). +func (p *PagerResponder[T]) AddError(err error) { + p.pages = append(p.pages, errorinfo.NonRetriableError(err)) +} + +// AddResponseError adds an *azcore.ResponseError to the sequence of responses. +// The error is returned from the call to runtime.Pager[T].NextPage(). +func (p *PagerResponder[T]) AddResponseError(httpStatus int, errorCode string) { + p.pages = append(p.pages, errorinfo.NonRetriableError(&exported.ResponseError{ErrorCode: errorCode, StatusCode: httpStatus})) +} + +// AddPageOptions contains the optional values for PagerResponder[T].AddPage. +type AddPageOptions struct { + // placeholder for future options +} + +// Next returns the next response in the sequence (a T or an error). +// This function is called by the fake server internals. +func (p *PagerResponder[T]) Next(req *http.Request) (*http.Response, error) { + if len(p.pages) == 0 { + return nil, errorinfo.NonRetriableError(errors.New("fake paged response is empty")) + } + + page := p.pages[0] + p.pages = p.pages[1:] + + pageT, ok := page.(pageResp[T]) + if ok { + body, err := json.Marshal(pageT.entry) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + content := ResponseContent{ + HTTPStatus: pageT.httpStatus, + Header: http.Header{}, + } + resp, err := NewResponse(content, req) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + return SetResponseBody(resp, body, shared.ContentTypeAppJSON), nil + } + + err := page.(error) + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + // fix up the raw response + rawResp, err := newErrorResponse(respErr.StatusCode, respErr.ErrorCode, req) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + respErr.RawResponse = rawResp + } + return nil, errorinfo.NonRetriableError(err) +} + +// More returns true if there are more responses for consumption. +// This function is called by the fake server internals. +func (p *PagerResponder[T]) More() bool { + return len(p.pages) > 0 +} + +// nextLinkURLSuffix is the URL path suffix for a faked next page followed by one or more digits. +const nextLinkURLSuffix = "/fake_page_" + +// InjectNextLinks is used to populate the nextLink field. +// The inject callback is executed for every T in the sequence except for the last one. +// This function is called by the fake server internals. +func (p *PagerResponder[T]) InjectNextLinks(req *http.Request, inject func(page *T, createLink func() string)) { + // populate the next links, including pageResp[T] where the next + // "page" is an error response. this allows an error response to + // be returned when there are no subsequent pages. + pageNum := 1 + for i := range p.pages { + if i+1 == len(p.pages) { + // no nextLink for last page + break + } + + pageT, ok := p.pages[i].(pageResp[T]) + if !ok { + // error entry, no next link + continue + } + + qp := "" + if req.URL.RawQuery != "" { + qp = "?" + req.URL.RawQuery + } + + inject(&pageT.entry, func() string { + // NOTE: any changes to this path format MUST be reflected in SanitizePagerPath() + return fmt.Sprintf("%s://%s%s%s%d%s", req.URL.Scheme, req.URL.Host, req.URL.Path, nextLinkURLSuffix, pageNum, qp) + }) + pageNum++ + + // update the original slice with the modified page + p.pages[i] = pageT + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// PollerResponder represents a sequence of responses for a long-running operation. +// Any non-terminal responses are replayed in the order in which they were added. +// The terminal response, success or error, is always the final response. +type PollerResponder[T any] struct { + nonTermResps []nonTermResp + httpStatus int + res *T + err *exported.ResponseError +} + +// AddNonTerminalResponse adds a non-terminal response to the sequence of responses. +func (p *PollerResponder[T]) AddNonTerminalResponse(httpStatus int, o *AddNonTerminalResponseOptions) { + p.nonTermResps = append(p.nonTermResps, nonTermResp{httpStatus: httpStatus, status: "InProgress"}) +} + +// AddPollingError adds an error to the sequence of responses. +// Use this to simulate an error durring polling. +// NOTE: adding this as the first response will cause the Begin* LRO API to return this error. +func (p *PollerResponder[T]) AddPollingError(err error) { + p.nonTermResps = append(p.nonTermResps, nonTermResp{err: err}) +} + +// SetTerminalResponse sets the provided value as the successful, terminal response. +func (p *PollerResponder[T]) SetTerminalResponse(httpStatus int, result T, o *SetTerminalResponseOptions) { + p.httpStatus = httpStatus + p.res = &result +} + +// SetTerminalError sets an *azcore.ResponseError with the specified values as the failed terminal response. +func (p *PollerResponder[T]) SetTerminalError(httpStatus int, errorCode string) { + p.err = &exported.ResponseError{ErrorCode: errorCode, StatusCode: httpStatus} +} + +// AddNonTerminalResponseOptions contains the optional values for PollerResponder[T].AddNonTerminalResponse. +type AddNonTerminalResponseOptions struct { + // place holder for future optional values +} + +// SetTerminalResponseOptions contains the optional values for PollerResponder[T].SetTerminalResponse. +type SetTerminalResponseOptions struct { + // place holder for future optional values +} + +// More returns true if there are more responses for consumption. +// This function is called by the fake server internals. +func (p *PollerResponder[T]) More() bool { + return len(p.nonTermResps) > 0 || p.err != nil || p.res != nil +} + +// Next returns the next response in the sequence (a *http.Response or an error). +// This function is called by the fake server internals. +func (p *PollerResponder[T]) Next(req *http.Request) (*http.Response, error) { + if len(p.nonTermResps) > 0 { + resp := p.nonTermResps[0] + p.nonTermResps = p.nonTermResps[1:] + + if resp.err != nil { + return nil, errorinfo.NonRetriableError(resp.err) + } + + content := ResponseContent{ + HTTPStatus: resp.httpStatus, + Header: http.Header{}, + } + httpResp, err := NewResponse(content, req) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + httpResp.Header.Set(shared.HeaderFakePollerStatus, resp.status) + + if resp.retryAfter > 0 { + httpResp.Header.Add(shared.HeaderRetryAfter, strconv.Itoa(resp.retryAfter)) + } + + return httpResp, nil + } + + if p.err != nil { + respErr := p.err + rawResp, err := newErrorResponse(p.err.StatusCode, p.err.ErrorCode, req) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + respErr.RawResponse = rawResp + p.err = nil + return nil, errorinfo.NonRetriableError(respErr) + } else if p.res != nil { + body, err := json.Marshal(*p.res) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + p.res = nil + content := ResponseContent{ + HTTPStatus: p.httpStatus, + Header: http.Header{}, + } + resp, err := NewResponse(content, req) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + httpResp := SetResponseBody(resp, body, shared.ContentTypeAppJSON) + httpResp.Header.Set(shared.HeaderFakePollerStatus, "Succeeded") + return httpResp, nil + } else { + return nil, errorinfo.NonRetriableError(errors.New("fake poller response is emtpy")) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// ResponseContent is used when building the *http.Response. +// This type is used by the fake server internals. +type ResponseContent struct { + // HTTPStatus is the HTTP status code to use in the response. + HTTPStatus int + + // Header contains the headers from SetResponseOptions.Header to include in the HTTP response. + Header http.Header +} + +// ResponseOptions contains the optional values for NewResponse(). +type ResponseOptions struct { + // Body is the HTTP response body. + Body io.ReadCloser + + // ContentType is the value for the Content-Type HTTP header. + ContentType string +} + +type pageResp[T any] struct { + httpStatus int + entry T +} + +type nonTermResp struct { + httpStatus int + status string + retryAfter int + err error +} + +// SetResponseBody wraps body in a nop-closing bytes reader and assigned it to resp.Body. +// The Content-Type header will be added with the specified value. +func SetResponseBody(resp *http.Response, body []byte, contentType string) *http.Response { + if l := int64(len(body)); l > 0 { + resp.Header.Set(shared.HeaderContentType, contentType) + resp.ContentLength = l + resp.Body = io.NopCloser(bytes.NewReader(body)) + } + return resp +} + +// NewResponse creates a new *http.Response with the specified content and req as the response's request. +func NewResponse(content ResponseContent, req *http.Request) (*http.Response, error) { + if content.HTTPStatus == 0 { + return nil, errors.New("fake: no HTTP status code was specified") + } else if content.Header == nil { + content.Header = http.Header{} + } + return &http.Response{ + Body: http.NoBody, + Header: content.Header, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: req, + Status: fmt.Sprintf("%d %s", content.HTTPStatus, http.StatusText(content.HTTPStatus)), + StatusCode: content.HTTPStatus, + }, nil +} + +var pageSuffixRegex = regexp.MustCompile(nextLinkURLSuffix + `\d+$`) + +// SanitizePagerPath removes any fake-appended suffix from a URL's path. +func SanitizePagerPath(path string) string { + return pageSuffixRegex.ReplaceAllLiteralString(path, "") +} + +func newErrorResponse(statusCode int, errorCode string, req *http.Request) (*http.Response, error) { + content := ResponseContent{ + HTTPStatus: statusCode, + Header: http.Header{}, + } + resp, err := NewResponse(content, req) + if err != nil { + return nil, err + } + resp.Header.Set(shared.HeaderXMSErrorCode, errorCode) + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server/server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server/server.go new file mode 100644 index 00000000000..827da555420 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server/server.go @@ -0,0 +1,231 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package server provides runtime functionality for fake servers. +// Application code won't need to import this package. +package server + +import ( + "encoding/json" + "encoding/xml" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/internal/exported" + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + fakepoller "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// ResponseContent is used when building the *http.Response. +// This type is used by the fake server internals. +type ResponseContent = exported.ResponseContent + +// ResponseOptions contains the optional values for NewResponse(). +type ResponseOptions = exported.ResponseOptions + +// NewResponse returns a *http.Response. +// This function is called by the fake server internals. +func NewResponse(content ResponseContent, req *http.Request, opts *ResponseOptions) (*http.Response, error) { + resp, err := exported.NewResponse(content, req) + if err != nil { + return nil, err + } + if opts != nil { + if opts.Body != nil { + resp.Body = opts.Body + } + if opts.ContentType != "" { + resp.Header.Set(shared.HeaderContentType, opts.ContentType) + } + } + return resp, nil +} + +// MarshalResponseAsByteArray base-64 encodes the body with the specified format and returns it in a *http.Response. +// This function is called by the fake server internals. +func MarshalResponseAsByteArray(content ResponseContent, body []byte, format azexported.Base64Encoding, req *http.Request) (*http.Response, error) { + resp, err := exported.NewResponse(content, req) + if err != nil { + return nil, err + } + if body != nil { + resp = exported.SetResponseBody(resp, []byte(azexported.EncodeByteArray(body, format)), shared.ContentTypeAppJSON) + } + return resp, nil +} + +// MarshalResponseAsJSON converts the body into JSON and returns it in a *http.Response. +// This function is called by the fake server internals. +func MarshalResponseAsJSON(content ResponseContent, v any, req *http.Request) (*http.Response, error) { + body, err := json.Marshal(v) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + resp, err := exported.NewResponse(content, req) + if err != nil { + return nil, err + } + resp = exported.SetResponseBody(resp, body, shared.ContentTypeAppJSON) + return resp, nil +} + +// MarshalResponseAsText converts the body into text and returns it in a *http.Response. +// This function is called by the fake server internals. +func MarshalResponseAsText(content ResponseContent, body *string, req *http.Request) (*http.Response, error) { + resp, err := exported.NewResponse(content, req) + if err != nil { + return nil, err + } + var bodyAsBytes []byte + if body != nil { + bodyAsBytes = []byte(*body) + } + resp = exported.SetResponseBody(resp, bodyAsBytes, shared.ContentTypeTextPlain) + return resp, nil +} + +// MarshalResponseAsXML converts the body into XML and returns it in a *http.Response. +// This function is called by the fake server internals. +func MarshalResponseAsXML(content ResponseContent, v any, req *http.Request) (*http.Response, error) { + body, err := xml.Marshal(v) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + resp, err := exported.NewResponse(content, req) + if err != nil { + return nil, err + } + resp = exported.SetResponseBody(resp, body, shared.ContentTypeAppXML) + return resp, nil +} + +// UnmarshalRequestAsByteArray base-64 decodes the body in the specified format. +// This function is called by the fake server internals. +func UnmarshalRequestAsByteArray(req *http.Request, format azexported.Base64Encoding) ([]byte, error) { + if req.Body == nil { + return nil, nil + } + body, err := io.ReadAll(req.Body) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + req.Body.Close() + var val []byte + if err := azexported.DecodeByteArray(string(body), &val, format); err != nil { + return nil, errorinfo.NonRetriableError(err) + } + return val, nil +} + +// UnmarshalRequestAsJSON unmarshalls the request body into an instance of T. +// This function is called by the fake server internals. +func UnmarshalRequestAsJSON[T any](req *http.Request) (T, error) { + tt := *new(T) + if req.Body == nil { + return tt, nil + } + body, err := io.ReadAll(req.Body) + if err != nil { + return tt, errorinfo.NonRetriableError(err) + } + req.Body.Close() + if err = json.Unmarshal(body, &tt); err != nil { + err = errorinfo.NonRetriableError(err) + } + return tt, err +} + +// UnmarshalRequestAsText unmarshalls the request body into a string. +// This function is called by the fake server internals. +func UnmarshalRequestAsText(req *http.Request) (string, error) { + if req.Body == nil { + return "", nil + } + body, err := io.ReadAll(req.Body) + if err != nil { + return "", errorinfo.NonRetriableError(err) + } + req.Body.Close() + return string(body), nil +} + +// UnmarshalRequestAsXML unmarshalls the request body into an instance of T. +// This function is called by the fake server internals. +func UnmarshalRequestAsXML[T any](req *http.Request) (T, error) { + tt := *new(T) + if req.Body == nil { + return tt, nil + } + body, err := io.ReadAll(req.Body) + if err != nil { + return tt, errorinfo.NonRetriableError(err) + } + req.Body.Close() + if err = xml.Unmarshal(body, &tt); err != nil { + err = errorinfo.NonRetriableError(err) + } + return tt, err +} + +// GetResponse returns the response associated with the Responder. +// This function is called by the fake server internals. +func GetResponse[T any](r fake.Responder[T]) T { + return exported.Responder[T](r).GetResponse() +} + +// GetResponseContent returns the ResponseContent associated with the Responder. +// This function is called by the fake server internals. +func GetResponseContent[T any](r fake.Responder[T]) ResponseContent { + return exported.Responder[T](r).GetResponseContent() +} + +// GetError returns the error for this responder. +// This function is called by the fake server internals. +func GetError(e fake.ErrorResponder, req *http.Request) error { + return exported.ErrorResponder(e).GetError(req) +} + +// PagerResponderNext returns the next response in the sequence (a T or an error). +// This function is called by the fake server internals. +func PagerResponderNext[T any](p *fake.PagerResponder[T], req *http.Request) (*http.Response, error) { + return (*exported.PagerResponder[T])(p).Next(req) +} + +// PagerResponderMore returns true if there are more responses for consumption. +// This function is called by the fake server internals. +func PagerResponderMore[T any](p *fake.PagerResponder[T]) bool { + return (*exported.PagerResponder[T])(p).More() +} + +// PagerResponderInjectNextLinks is used to populate the nextLink field. +// The inject callback is executed for every T in the sequence except for the last one. +// This function is called by the fake server internals. +func PagerResponderInjectNextLinks[T any](p *fake.PagerResponder[T], req *http.Request, inject func(page *T, createLink func() string)) { + (*exported.PagerResponder[T])(p).InjectNextLinks(req, inject) +} + +// PollerResponderMore returns true if there are more responses for consumption. +// This function is called by the fake server internals. +func PollerResponderMore[T any](p *fake.PollerResponder[T]) bool { + return (*exported.PollerResponder[T])(p).More() +} + +// PollerResponderNext returns the next response in the sequence (a *http.Response or an error). +// This function is called by the fake server internals. +func PollerResponderNext[T any](p *fake.PollerResponder[T], req *http.Request) (*http.Response, error) { + return (*exported.PollerResponder[T])(p).Next(req) +} + +// SanitizePagerPollerPath removes any fake-appended suffix from a URL's path. +// This function is called by the fake server internals. +func SanitizePagerPollerPath(path string) string { + path = exported.SanitizePagerPath(path) + path = fakepoller.SanitizePollerPath(path) + return path +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/availabilitysets_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/availabilitysets_server.go new file mode 100644 index 00000000000..52fc94350dc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/availabilitysets_server.go @@ -0,0 +1,372 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// AvailabilitySetsServer is a fake server for instances of the armcompute.AvailabilitySetsClient type. +type AvailabilitySetsServer struct { + // CreateOrUpdate is the fake for method AvailabilitySetsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK + CreateOrUpdate func(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters armcompute.AvailabilitySet, options *armcompute.AvailabilitySetsClientCreateOrUpdateOptions) (resp azfake.Responder[armcompute.AvailabilitySetsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method AvailabilitySetsClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, resourceGroupName string, availabilitySetName string, options *armcompute.AvailabilitySetsClientDeleteOptions) (resp azfake.Responder[armcompute.AvailabilitySetsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method AvailabilitySetsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, availabilitySetName string, options *armcompute.AvailabilitySetsClientGetOptions) (resp azfake.Responder[armcompute.AvailabilitySetsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method AvailabilitySetsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armcompute.AvailabilitySetsClientListOptions) (resp azfake.PagerResponder[armcompute.AvailabilitySetsClientListResponse]) + + // NewListAvailableSizesPager is the fake for method AvailabilitySetsClient.NewListAvailableSizesPager + // HTTP status codes to indicate success: http.StatusOK + NewListAvailableSizesPager func(resourceGroupName string, availabilitySetName string, options *armcompute.AvailabilitySetsClientListAvailableSizesOptions) (resp azfake.PagerResponder[armcompute.AvailabilitySetsClientListAvailableSizesResponse]) + + // NewListBySubscriptionPager is the fake for method AvailabilitySetsClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armcompute.AvailabilitySetsClientListBySubscriptionOptions) (resp azfake.PagerResponder[armcompute.AvailabilitySetsClientListBySubscriptionResponse]) + + // Update is the fake for method AvailabilitySetsClient.Update + // HTTP status codes to indicate success: http.StatusOK + Update func(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters armcompute.AvailabilitySetUpdate, options *armcompute.AvailabilitySetsClientUpdateOptions) (resp azfake.Responder[armcompute.AvailabilitySetsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewAvailabilitySetsServerTransport creates a new instance of AvailabilitySetsServerTransport with the provided implementation. +// The returned AvailabilitySetsServerTransport instance is connected to an instance of armcompute.AvailabilitySetsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAvailabilitySetsServerTransport(srv *AvailabilitySetsServer) *AvailabilitySetsServerTransport { + return &AvailabilitySetsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.AvailabilitySetsClientListResponse]](), + newListAvailableSizesPager: newTracker[azfake.PagerResponder[armcompute.AvailabilitySetsClientListAvailableSizesResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armcompute.AvailabilitySetsClientListBySubscriptionResponse]](), + } +} + +// AvailabilitySetsServerTransport connects instances of armcompute.AvailabilitySetsClient to instances of AvailabilitySetsServer. +// Don't use this type directly, use NewAvailabilitySetsServerTransport instead. +type AvailabilitySetsServerTransport struct { + srv *AvailabilitySetsServer + newListPager *tracker[azfake.PagerResponder[armcompute.AvailabilitySetsClientListResponse]] + newListAvailableSizesPager *tracker[azfake.PagerResponder[armcompute.AvailabilitySetsClientListAvailableSizesResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armcompute.AvailabilitySetsClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for AvailabilitySetsServerTransport. +func (a *AvailabilitySetsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AvailabilitySetsClient.CreateOrUpdate": + resp, err = a.dispatchCreateOrUpdate(req) + case "AvailabilitySetsClient.Delete": + resp, err = a.dispatchDelete(req) + case "AvailabilitySetsClient.Get": + resp, err = a.dispatchGet(req) + case "AvailabilitySetsClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + case "AvailabilitySetsClient.NewListAvailableSizesPager": + resp, err = a.dispatchNewListAvailableSizesPager(req) + case "AvailabilitySetsClient.NewListBySubscriptionPager": + resp, err = a.dispatchNewListBySubscriptionPager(req) + case "AvailabilitySetsClient.Update": + resp, err = a.dispatchUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AvailabilitySetsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if a.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/availabilitySets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.AvailabilitySet](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + availabilitySetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("availabilitySetName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, availabilitySetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AvailabilitySet, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AvailabilitySetsServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if a.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/availabilitySets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + availabilitySetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("availabilitySetName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Delete(req.Context(), resourceGroupNameParam, availabilitySetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AvailabilitySetsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/availabilitySets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + availabilitySetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("availabilitySetName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, availabilitySetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AvailabilitySet, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AvailabilitySetsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/availabilitySets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.AvailabilitySetsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} + +func (a *AvailabilitySetsServerTransport) dispatchNewListAvailableSizesPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListAvailableSizesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAvailableSizesPager not implemented")} + } + newListAvailableSizesPager := a.newListAvailableSizesPager.get(req) + if newListAvailableSizesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/availabilitySets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vmSizes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + availabilitySetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("availabilitySetName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListAvailableSizesPager(resourceGroupNameParam, availabilitySetNameParam, nil) + newListAvailableSizesPager = &resp + a.newListAvailableSizesPager.add(req, newListAvailableSizesPager) + } + resp, err := server.PagerResponderNext(newListAvailableSizesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListAvailableSizesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAvailableSizesPager) { + a.newListAvailableSizesPager.remove(req) + } + return resp, nil +} + +func (a *AvailabilitySetsServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := a.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/availabilitySets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.AvailabilitySetsClientListBySubscriptionOptions + if expandParam != nil { + options = &armcompute.AvailabilitySetsClientListBySubscriptionOptions{ + Expand: expandParam, + } + } + resp := a.srv.NewListBySubscriptionPager(options) + newListBySubscriptionPager = &resp + a.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armcompute.AvailabilitySetsClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + a.newListBySubscriptionPager.remove(req) + } + return resp, nil +} + +func (a *AvailabilitySetsServerTransport) dispatchUpdate(req *http.Request) (*http.Response, error) { + if a.srv.Update == nil { + return nil, &nonRetriableError{errors.New("fake for method Update not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/availabilitySets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.AvailabilitySetUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + availabilitySetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("availabilitySetName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Update(req.Context(), resourceGroupNameParam, availabilitySetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AvailabilitySet, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/capacityreservationgroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/capacityreservationgroups_server.go new file mode 100644 index 00000000000..04b97ddbe35 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/capacityreservationgroups_server.go @@ -0,0 +1,356 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CapacityReservationGroupsServer is a fake server for instances of the armcompute.CapacityReservationGroupsClient type. +type CapacityReservationGroupsServer struct { + // CreateOrUpdate is the fake for method CapacityReservationGroupsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters armcompute.CapacityReservationGroup, options *armcompute.CapacityReservationGroupsClientCreateOrUpdateOptions) (resp azfake.Responder[armcompute.CapacityReservationGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method CapacityReservationGroupsClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *armcompute.CapacityReservationGroupsClientDeleteOptions) (resp azfake.Responder[armcompute.CapacityReservationGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method CapacityReservationGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *armcompute.CapacityReservationGroupsClientGetOptions) (resp azfake.Responder[armcompute.CapacityReservationGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByResourceGroupPager is the fake for method CapacityReservationGroupsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.CapacityReservationGroupsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.CapacityReservationGroupsClientListByResourceGroupResponse]) + + // NewListBySubscriptionPager is the fake for method CapacityReservationGroupsClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armcompute.CapacityReservationGroupsClientListBySubscriptionOptions) (resp azfake.PagerResponder[armcompute.CapacityReservationGroupsClientListBySubscriptionResponse]) + + // Update is the fake for method CapacityReservationGroupsClient.Update + // HTTP status codes to indicate success: http.StatusOK + Update func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters armcompute.CapacityReservationGroupUpdate, options *armcompute.CapacityReservationGroupsClientUpdateOptions) (resp azfake.Responder[armcompute.CapacityReservationGroupsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewCapacityReservationGroupsServerTransport creates a new instance of CapacityReservationGroupsServerTransport with the provided implementation. +// The returned CapacityReservationGroupsServerTransport instance is connected to an instance of armcompute.CapacityReservationGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCapacityReservationGroupsServerTransport(srv *CapacityReservationGroupsServer) *CapacityReservationGroupsServerTransport { + return &CapacityReservationGroupsServerTransport{ + srv: srv, + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.CapacityReservationGroupsClientListByResourceGroupResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armcompute.CapacityReservationGroupsClientListBySubscriptionResponse]](), + } +} + +// CapacityReservationGroupsServerTransport connects instances of armcompute.CapacityReservationGroupsClient to instances of CapacityReservationGroupsServer. +// Don't use this type directly, use NewCapacityReservationGroupsServerTransport instead. +type CapacityReservationGroupsServerTransport struct { + srv *CapacityReservationGroupsServer + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.CapacityReservationGroupsClientListByResourceGroupResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armcompute.CapacityReservationGroupsClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for CapacityReservationGroupsServerTransport. +func (c *CapacityReservationGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CapacityReservationGroupsClient.CreateOrUpdate": + resp, err = c.dispatchCreateOrUpdate(req) + case "CapacityReservationGroupsClient.Delete": + resp, err = c.dispatchDelete(req) + case "CapacityReservationGroupsClient.Get": + resp, err = c.dispatchGet(req) + case "CapacityReservationGroupsClient.NewListByResourceGroupPager": + resp, err = c.dispatchNewListByResourceGroupPager(req) + case "CapacityReservationGroupsClient.NewListBySubscriptionPager": + resp, err = c.dispatchNewListBySubscriptionPager(req) + case "CapacityReservationGroupsClient.Update": + resp, err = c.dispatchUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CapacityReservationGroupsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if c.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.CapacityReservationGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CapacityReservationGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CapacityReservationGroupsServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if c.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Delete(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CapacityReservationGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.CapacityReservationGroupInstanceViewTypes(expandUnescaped)) + var options *armcompute.CapacityReservationGroupsClientGetOptions + if expandParam != nil { + options = &armcompute.CapacityReservationGroupsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := c.srv.Get(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CapacityReservationGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CapacityReservationGroupsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := c.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.ExpandTypesForGetCapacityReservationGroups(expandUnescaped)) + var options *armcompute.CapacityReservationGroupsClientListByResourceGroupOptions + if expandParam != nil { + options = &armcompute.CapacityReservationGroupsClientListByResourceGroupOptions{ + Expand: expandParam, + } + } + resp := c.srv.NewListByResourceGroupPager(resourceGroupNameParam, options) + newListByResourceGroupPager = &resp + c.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.CapacityReservationGroupsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + c.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (c *CapacityReservationGroupsServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := c.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.ExpandTypesForGetCapacityReservationGroups(expandUnescaped)) + resourceIDsOnlyUnescaped, err := url.QueryUnescape(qp.Get("resourceIdsOnly")) + if err != nil { + return nil, err + } + resourceIDsOnlyParam := getOptional(armcompute.ResourceIDOptionsForGetCapacityReservationGroups(resourceIDsOnlyUnescaped)) + var options *armcompute.CapacityReservationGroupsClientListBySubscriptionOptions + if expandParam != nil || resourceIDsOnlyParam != nil { + options = &armcompute.CapacityReservationGroupsClientListBySubscriptionOptions{ + Expand: expandParam, + ResourceIDsOnly: resourceIDsOnlyParam, + } + } + resp := c.srv.NewListBySubscriptionPager(options) + newListBySubscriptionPager = &resp + c.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armcompute.CapacityReservationGroupsClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + c.newListBySubscriptionPager.remove(req) + } + return resp, nil +} + +func (c *CapacityReservationGroupsServerTransport) dispatchUpdate(req *http.Request) (*http.Response, error) { + if c.srv.Update == nil { + return nil, &nonRetriableError{errors.New("fake for method Update not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.CapacityReservationGroupUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Update(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CapacityReservationGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/capacityreservations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/capacityreservations_server.go new file mode 100644 index 00000000000..291c5d89cd9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/capacityreservations_server.go @@ -0,0 +1,344 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CapacityReservationsServer is a fake server for instances of the armcompute.CapacityReservationsClient type. +type CapacityReservationsServer struct { + // BeginCreateOrUpdate is the fake for method CapacityReservationsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters armcompute.CapacityReservation, options *armcompute.CapacityReservationsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.CapacityReservationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method CapacityReservationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *armcompute.CapacityReservationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.CapacityReservationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method CapacityReservationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *armcompute.CapacityReservationsClientGetOptions) (resp azfake.Responder[armcompute.CapacityReservationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByCapacityReservationGroupPager is the fake for method CapacityReservationsClient.NewListByCapacityReservationGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByCapacityReservationGroupPager func(resourceGroupName string, capacityReservationGroupName string, options *armcompute.CapacityReservationsClientListByCapacityReservationGroupOptions) (resp azfake.PagerResponder[armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse]) + + // BeginUpdate is the fake for method CapacityReservationsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters armcompute.CapacityReservationUpdate, options *armcompute.CapacityReservationsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.CapacityReservationsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewCapacityReservationsServerTransport creates a new instance of CapacityReservationsServerTransport with the provided implementation. +// The returned CapacityReservationsServerTransport instance is connected to an instance of armcompute.CapacityReservationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCapacityReservationsServerTransport(srv *CapacityReservationsServer) *CapacityReservationsServerTransport { + return &CapacityReservationsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.CapacityReservationsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.CapacityReservationsClientDeleteResponse]](), + newListByCapacityReservationGroupPager: newTracker[azfake.PagerResponder[armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.CapacityReservationsClientUpdateResponse]](), + } +} + +// CapacityReservationsServerTransport connects instances of armcompute.CapacityReservationsClient to instances of CapacityReservationsServer. +// Don't use this type directly, use NewCapacityReservationsServerTransport instead. +type CapacityReservationsServerTransport struct { + srv *CapacityReservationsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.CapacityReservationsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.CapacityReservationsClientDeleteResponse]] + newListByCapacityReservationGroupPager *tracker[azfake.PagerResponder[armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.CapacityReservationsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for CapacityReservationsServerTransport. +func (c *CapacityReservationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CapacityReservationsClient.BeginCreateOrUpdate": + resp, err = c.dispatchBeginCreateOrUpdate(req) + case "CapacityReservationsClient.BeginDelete": + resp, err = c.dispatchBeginDelete(req) + case "CapacityReservationsClient.Get": + resp, err = c.dispatchGet(req) + case "CapacityReservationsClient.NewListByCapacityReservationGroupPager": + resp, err = c.dispatchNewListByCapacityReservationGroupPager(req) + case "CapacityReservationsClient.BeginUpdate": + resp, err = c.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CapacityReservationsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if c.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := c.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/capacityReservations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.CapacityReservation](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + capacityReservationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, capacityReservationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + c.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + c.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + c.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (c *CapacityReservationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if c.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := c.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/capacityReservations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + capacityReservationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginDelete(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, capacityReservationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + c.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + c.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + c.beginDelete.remove(req) + } + + return resp, nil +} + +func (c *CapacityReservationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/capacityReservations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + capacityReservationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.CapacityReservationInstanceViewTypes(expandUnescaped)) + var options *armcompute.CapacityReservationsClientGetOptions + if expandParam != nil { + options = &armcompute.CapacityReservationsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := c.srv.Get(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, capacityReservationNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CapacityReservation, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CapacityReservationsServerTransport) dispatchNewListByCapacityReservationGroupPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListByCapacityReservationGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByCapacityReservationGroupPager not implemented")} + } + newListByCapacityReservationGroupPager := c.newListByCapacityReservationGroupPager.get(req) + if newListByCapacityReservationGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/capacityReservations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListByCapacityReservationGroupPager(resourceGroupNameParam, capacityReservationGroupNameParam, nil) + newListByCapacityReservationGroupPager = &resp + c.newListByCapacityReservationGroupPager.add(req, newListByCapacityReservationGroupPager) + server.PagerResponderInjectNextLinks(newListByCapacityReservationGroupPager, req, func(page *armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByCapacityReservationGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListByCapacityReservationGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByCapacityReservationGroupPager) { + c.newListByCapacityReservationGroupPager.remove(req) + } + return resp, nil +} + +func (c *CapacityReservationsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if c.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := c.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/capacityReservationGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/capacityReservations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.CapacityReservationUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + capacityReservationGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationGroupName")]) + if err != nil { + return nil, err + } + capacityReservationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("capacityReservationName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginUpdate(req.Context(), resourceGroupNameParam, capacityReservationGroupNameParam, capacityReservationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + c.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + c.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceoperatingsystems_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceoperatingsystems_server.go new file mode 100644 index 00000000000..b27b3ba9b71 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceoperatingsystems_server.go @@ -0,0 +1,232 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CloudServiceOperatingSystemsServer is a fake server for instances of the armcompute.CloudServiceOperatingSystemsClient type. +type CloudServiceOperatingSystemsServer struct { + // GetOSFamily is the fake for method CloudServiceOperatingSystemsClient.GetOSFamily + // HTTP status codes to indicate success: http.StatusOK + GetOSFamily func(ctx context.Context, location string, osFamilyName string, options *armcompute.CloudServiceOperatingSystemsClientGetOSFamilyOptions) (resp azfake.Responder[armcompute.CloudServiceOperatingSystemsClientGetOSFamilyResponse], errResp azfake.ErrorResponder) + + // GetOSVersion is the fake for method CloudServiceOperatingSystemsClient.GetOSVersion + // HTTP status codes to indicate success: http.StatusOK + GetOSVersion func(ctx context.Context, location string, osVersionName string, options *armcompute.CloudServiceOperatingSystemsClientGetOSVersionOptions) (resp azfake.Responder[armcompute.CloudServiceOperatingSystemsClientGetOSVersionResponse], errResp azfake.ErrorResponder) + + // NewListOSFamiliesPager is the fake for method CloudServiceOperatingSystemsClient.NewListOSFamiliesPager + // HTTP status codes to indicate success: http.StatusOK + NewListOSFamiliesPager func(location string, options *armcompute.CloudServiceOperatingSystemsClientListOSFamiliesOptions) (resp azfake.PagerResponder[armcompute.CloudServiceOperatingSystemsClientListOSFamiliesResponse]) + + // NewListOSVersionsPager is the fake for method CloudServiceOperatingSystemsClient.NewListOSVersionsPager + // HTTP status codes to indicate success: http.StatusOK + NewListOSVersionsPager func(location string, options *armcompute.CloudServiceOperatingSystemsClientListOSVersionsOptions) (resp azfake.PagerResponder[armcompute.CloudServiceOperatingSystemsClientListOSVersionsResponse]) +} + +// NewCloudServiceOperatingSystemsServerTransport creates a new instance of CloudServiceOperatingSystemsServerTransport with the provided implementation. +// The returned CloudServiceOperatingSystemsServerTransport instance is connected to an instance of armcompute.CloudServiceOperatingSystemsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCloudServiceOperatingSystemsServerTransport(srv *CloudServiceOperatingSystemsServer) *CloudServiceOperatingSystemsServerTransport { + return &CloudServiceOperatingSystemsServerTransport{ + srv: srv, + newListOSFamiliesPager: newTracker[azfake.PagerResponder[armcompute.CloudServiceOperatingSystemsClientListOSFamiliesResponse]](), + newListOSVersionsPager: newTracker[azfake.PagerResponder[armcompute.CloudServiceOperatingSystemsClientListOSVersionsResponse]](), + } +} + +// CloudServiceOperatingSystemsServerTransport connects instances of armcompute.CloudServiceOperatingSystemsClient to instances of CloudServiceOperatingSystemsServer. +// Don't use this type directly, use NewCloudServiceOperatingSystemsServerTransport instead. +type CloudServiceOperatingSystemsServerTransport struct { + srv *CloudServiceOperatingSystemsServer + newListOSFamiliesPager *tracker[azfake.PagerResponder[armcompute.CloudServiceOperatingSystemsClientListOSFamiliesResponse]] + newListOSVersionsPager *tracker[azfake.PagerResponder[armcompute.CloudServiceOperatingSystemsClientListOSVersionsResponse]] +} + +// Do implements the policy.Transporter interface for CloudServiceOperatingSystemsServerTransport. +func (c *CloudServiceOperatingSystemsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CloudServiceOperatingSystemsClient.GetOSFamily": + resp, err = c.dispatchGetOSFamily(req) + case "CloudServiceOperatingSystemsClient.GetOSVersion": + resp, err = c.dispatchGetOSVersion(req) + case "CloudServiceOperatingSystemsClient.NewListOSFamiliesPager": + resp, err = c.dispatchNewListOSFamiliesPager(req) + case "CloudServiceOperatingSystemsClient.NewListOSVersionsPager": + resp, err = c.dispatchNewListOSVersionsPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CloudServiceOperatingSystemsServerTransport) dispatchGetOSFamily(req *http.Request) (*http.Response, error) { + if c.srv.GetOSFamily == nil { + return nil, &nonRetriableError{errors.New("fake for method GetOSFamily not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/cloudServiceOsFamilies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + osFamilyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("osFamilyName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.GetOSFamily(req.Context(), locationParam, osFamilyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).OSFamily, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServiceOperatingSystemsServerTransport) dispatchGetOSVersion(req *http.Request) (*http.Response, error) { + if c.srv.GetOSVersion == nil { + return nil, &nonRetriableError{errors.New("fake for method GetOSVersion not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/cloudServiceOsVersions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + osVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("osVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.GetOSVersion(req.Context(), locationParam, osVersionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).OSVersion, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServiceOperatingSystemsServerTransport) dispatchNewListOSFamiliesPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListOSFamiliesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListOSFamiliesPager not implemented")} + } + newListOSFamiliesPager := c.newListOSFamiliesPager.get(req) + if newListOSFamiliesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/cloudServiceOsFamilies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListOSFamiliesPager(locationParam, nil) + newListOSFamiliesPager = &resp + c.newListOSFamiliesPager.add(req, newListOSFamiliesPager) + server.PagerResponderInjectNextLinks(newListOSFamiliesPager, req, func(page *armcompute.CloudServiceOperatingSystemsClientListOSFamiliesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListOSFamiliesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListOSFamiliesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListOSFamiliesPager) { + c.newListOSFamiliesPager.remove(req) + } + return resp, nil +} + +func (c *CloudServiceOperatingSystemsServerTransport) dispatchNewListOSVersionsPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListOSVersionsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListOSVersionsPager not implemented")} + } + newListOSVersionsPager := c.newListOSVersionsPager.get(req) + if newListOSVersionsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/cloudServiceOsVersions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListOSVersionsPager(locationParam, nil) + newListOSVersionsPager = &resp + c.newListOSVersionsPager.add(req, newListOSVersionsPager) + server.PagerResponderInjectNextLinks(newListOSVersionsPager, req, func(page *armcompute.CloudServiceOperatingSystemsClientListOSVersionsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListOSVersionsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListOSVersionsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListOSVersionsPager) { + c.newListOSVersionsPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceroleinstances_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceroleinstances_server.go new file mode 100644 index 00000000000..7b061ec4d58 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceroleinstances_server.go @@ -0,0 +1,493 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CloudServiceRoleInstancesServer is a fake server for instances of the armcompute.CloudServiceRoleInstancesClient type. +type CloudServiceRoleInstancesServer struct { + // BeginDelete is the fake for method CloudServiceRoleInstancesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method CloudServiceRoleInstancesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientGetOptions) (resp azfake.Responder[armcompute.CloudServiceRoleInstancesClientGetResponse], errResp azfake.ErrorResponder) + + // GetInstanceView is the fake for method CloudServiceRoleInstancesClient.GetInstanceView + // HTTP status codes to indicate success: http.StatusOK + GetInstanceView func(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientGetInstanceViewOptions) (resp azfake.Responder[armcompute.CloudServiceRoleInstancesClientGetInstanceViewResponse], errResp azfake.ErrorResponder) + + // GetRemoteDesktopFile is the fake for method CloudServiceRoleInstancesClient.GetRemoteDesktopFile + // HTTP status codes to indicate success: http.StatusOK + GetRemoteDesktopFile func(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions) (resp azfake.Responder[armcompute.CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method CloudServiceRoleInstancesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientListOptions) (resp azfake.PagerResponder[armcompute.CloudServiceRoleInstancesClientListResponse]) + + // BeginRebuild is the fake for method CloudServiceRoleInstancesClient.BeginRebuild + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRebuild func(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientBeginRebuildOptions) (resp azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientRebuildResponse], errResp azfake.ErrorResponder) + + // BeginReimage is the fake for method CloudServiceRoleInstancesClient.BeginReimage + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReimage func(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientBeginReimageOptions) (resp azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientReimageResponse], errResp azfake.ErrorResponder) + + // BeginRestart is the fake for method CloudServiceRoleInstancesClient.BeginRestart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRestart func(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRoleInstancesClientBeginRestartOptions) (resp azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientRestartResponse], errResp azfake.ErrorResponder) +} + +// NewCloudServiceRoleInstancesServerTransport creates a new instance of CloudServiceRoleInstancesServerTransport with the provided implementation. +// The returned CloudServiceRoleInstancesServerTransport instance is connected to an instance of armcompute.CloudServiceRoleInstancesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCloudServiceRoleInstancesServerTransport(srv *CloudServiceRoleInstancesServer) *CloudServiceRoleInstancesServerTransport { + return &CloudServiceRoleInstancesServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.CloudServiceRoleInstancesClientListResponse]](), + beginRebuild: newTracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientRebuildResponse]](), + beginReimage: newTracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientReimageResponse]](), + beginRestart: newTracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientRestartResponse]](), + } +} + +// CloudServiceRoleInstancesServerTransport connects instances of armcompute.CloudServiceRoleInstancesClient to instances of CloudServiceRoleInstancesServer. +// Don't use this type directly, use NewCloudServiceRoleInstancesServerTransport instead. +type CloudServiceRoleInstancesServerTransport struct { + srv *CloudServiceRoleInstancesServer + beginDelete *tracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.CloudServiceRoleInstancesClientListResponse]] + beginRebuild *tracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientRebuildResponse]] + beginReimage *tracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientReimageResponse]] + beginRestart *tracker[azfake.PollerResponder[armcompute.CloudServiceRoleInstancesClientRestartResponse]] +} + +// Do implements the policy.Transporter interface for CloudServiceRoleInstancesServerTransport. +func (c *CloudServiceRoleInstancesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CloudServiceRoleInstancesClient.BeginDelete": + resp, err = c.dispatchBeginDelete(req) + case "CloudServiceRoleInstancesClient.Get": + resp, err = c.dispatchGet(req) + case "CloudServiceRoleInstancesClient.GetInstanceView": + resp, err = c.dispatchGetInstanceView(req) + case "CloudServiceRoleInstancesClient.GetRemoteDesktopFile": + resp, err = c.dispatchGetRemoteDesktopFile(req) + case "CloudServiceRoleInstancesClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + case "CloudServiceRoleInstancesClient.BeginRebuild": + resp, err = c.dispatchBeginRebuild(req) + case "CloudServiceRoleInstancesClient.BeginReimage": + resp, err = c.dispatchBeginReimage(req) + case "CloudServiceRoleInstancesClient.BeginRestart": + resp, err = c.dispatchBeginRestart(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if c.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := c.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginDelete(req.Context(), roleInstanceNameParam, resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + c.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + c.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + c.beginDelete.remove(req) + } + + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.InstanceViewTypes(expandUnescaped)) + var options *armcompute.CloudServiceRoleInstancesClientGetOptions + if expandParam != nil { + options = &armcompute.CloudServiceRoleInstancesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := c.srv.Get(req.Context(), roleInstanceNameParam, resourceGroupNameParam, cloudServiceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RoleInstance, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchGetInstanceView(req *http.Request) (*http.Response, error) { + if c.srv.GetInstanceView == nil { + return nil, &nonRetriableError{errors.New("fake for method GetInstanceView not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/instanceView` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.GetInstanceView(req.Context(), roleInstanceNameParam, resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RoleInstanceView, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchGetRemoteDesktopFile(req *http.Request) (*http.Response, error) { + if c.srv.GetRemoteDesktopFile == nil { + return nil, &nonRetriableError{errors.New("fake for method GetRemoteDesktopFile not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/remoteDesktopFile` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.GetRemoteDesktopFile(req.Context(), roleInstanceNameParam, resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, &server.ResponseOptions{ + Body: server.GetResponse(respr).Body, + ContentType: "application/octet-stream", + }) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.InstanceViewTypes(expandUnescaped)) + var options *armcompute.CloudServiceRoleInstancesClientListOptions + if expandParam != nil { + options = &armcompute.CloudServiceRoleInstancesClientListOptions{ + Expand: expandParam, + } + } + resp := c.srv.NewListPager(resourceGroupNameParam, cloudServiceNameParam, options) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.CloudServiceRoleInstancesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchBeginRebuild(req *http.Request) (*http.Response, error) { + if c.srv.BeginRebuild == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRebuild not implemented")} + } + beginRebuild := c.beginRebuild.get(req) + if beginRebuild == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rebuild` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginRebuild(req.Context(), roleInstanceNameParam, resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRebuild = &respr + c.beginRebuild.add(req, beginRebuild) + } + + resp, err := server.PollerResponderNext(beginRebuild, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginRebuild.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRebuild) { + c.beginRebuild.remove(req) + } + + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchBeginReimage(req *http.Request) (*http.Response, error) { + if c.srv.BeginReimage == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReimage not implemented")} + } + beginReimage := c.beginReimage.get(req) + if beginReimage == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reimage` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginReimage(req.Context(), roleInstanceNameParam, resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReimage = &respr + c.beginReimage.add(req, beginReimage) + } + + resp, err := server.PollerResponderNext(beginReimage, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginReimage.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReimage) { + c.beginReimage.remove(req) + } + + return resp, nil +} + +func (c *CloudServiceRoleInstancesServerTransport) dispatchBeginRestart(req *http.Request) (*http.Response, error) { + if c.srv.BeginRestart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRestart not implemented")} + } + beginRestart := c.beginRestart.get(req) + if beginRestart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restart` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginRestart(req.Context(), roleInstanceNameParam, resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRestart = &respr + c.beginRestart.add(req, beginRestart) + } + + resp, err := server.PollerResponderNext(beginRestart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginRestart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRestart) { + c.beginRestart.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceroles_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceroles_server.go new file mode 100644 index 00000000000..5945a4627dc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudserviceroles_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CloudServiceRolesServer is a fake server for instances of the armcompute.CloudServiceRolesClient type. +type CloudServiceRolesServer struct { + // Get is the fake for method CloudServiceRolesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, roleName string, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRolesClientGetOptions) (resp azfake.Responder[armcompute.CloudServiceRolesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method CloudServiceRolesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, cloudServiceName string, options *armcompute.CloudServiceRolesClientListOptions) (resp azfake.PagerResponder[armcompute.CloudServiceRolesClientListResponse]) +} + +// NewCloudServiceRolesServerTransport creates a new instance of CloudServiceRolesServerTransport with the provided implementation. +// The returned CloudServiceRolesServerTransport instance is connected to an instance of armcompute.CloudServiceRolesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCloudServiceRolesServerTransport(srv *CloudServiceRolesServer) *CloudServiceRolesServerTransport { + return &CloudServiceRolesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.CloudServiceRolesClientListResponse]](), + } +} + +// CloudServiceRolesServerTransport connects instances of armcompute.CloudServiceRolesClient to instances of CloudServiceRolesServer. +// Don't use this type directly, use NewCloudServiceRolesServerTransport instead. +type CloudServiceRolesServerTransport struct { + srv *CloudServiceRolesServer + newListPager *tracker[azfake.PagerResponder[armcompute.CloudServiceRolesClientListResponse]] +} + +// Do implements the policy.Transporter interface for CloudServiceRolesServerTransport. +func (c *CloudServiceRolesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CloudServiceRolesClient.Get": + resp, err = c.dispatchGet(req) + case "CloudServiceRolesClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CloudServiceRolesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roles/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + roleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), roleNameParam, resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CloudServiceRole, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServiceRolesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roles` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(resourceGroupNameParam, cloudServiceNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.CloudServiceRolesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudservices_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudservices_server.go new file mode 100644 index 00000000000..749eb413084 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudservices_server.go @@ -0,0 +1,745 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// CloudServicesServer is a fake server for instances of the armcompute.CloudServicesClient type. +type CloudServicesServer struct { + // BeginCreateOrUpdate is the fake for method CloudServicesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters armcompute.CloudService, options *armcompute.CloudServicesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method CloudServicesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientDeleteResponse], errResp azfake.ErrorResponder) + + // BeginDeleteInstances is the fake for method CloudServicesClient.BeginDeleteInstances + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDeleteInstances func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientBeginDeleteInstancesOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientDeleteInstancesResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method CloudServicesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientGetOptions) (resp azfake.Responder[armcompute.CloudServicesClientGetResponse], errResp azfake.ErrorResponder) + + // GetInstanceView is the fake for method CloudServicesClient.GetInstanceView + // HTTP status codes to indicate success: http.StatusOK + GetInstanceView func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientGetInstanceViewOptions) (resp azfake.Responder[armcompute.CloudServicesClientGetInstanceViewResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method CloudServicesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armcompute.CloudServicesClientListOptions) (resp azfake.PagerResponder[armcompute.CloudServicesClientListResponse]) + + // NewListAllPager is the fake for method CloudServicesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armcompute.CloudServicesClientListAllOptions) (resp azfake.PagerResponder[armcompute.CloudServicesClientListAllResponse]) + + // BeginPowerOff is the fake for method CloudServicesClient.BeginPowerOff + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPowerOff func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientBeginPowerOffOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientPowerOffResponse], errResp azfake.ErrorResponder) + + // BeginRebuild is the fake for method CloudServicesClient.BeginRebuild + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRebuild func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientBeginRebuildOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientRebuildResponse], errResp azfake.ErrorResponder) + + // BeginReimage is the fake for method CloudServicesClient.BeginReimage + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReimage func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientBeginReimageOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientReimageResponse], errResp azfake.ErrorResponder) + + // BeginRestart is the fake for method CloudServicesClient.BeginRestart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRestart func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientBeginRestartOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientRestartResponse], errResp azfake.ErrorResponder) + + // BeginStart is the fake for method CloudServicesClient.BeginStart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStart func(ctx context.Context, resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesClientBeginStartOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientStartResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method CloudServicesClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters armcompute.CloudServiceUpdate, options *armcompute.CloudServicesClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.CloudServicesClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewCloudServicesServerTransport creates a new instance of CloudServicesServerTransport with the provided implementation. +// The returned CloudServicesServerTransport instance is connected to an instance of armcompute.CloudServicesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCloudServicesServerTransport(srv *CloudServicesServer) *CloudServicesServerTransport { + return &CloudServicesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientDeleteResponse]](), + beginDeleteInstances: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientDeleteInstancesResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.CloudServicesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armcompute.CloudServicesClientListAllResponse]](), + beginPowerOff: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientPowerOffResponse]](), + beginRebuild: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientRebuildResponse]](), + beginReimage: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientReimageResponse]](), + beginRestart: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientRestartResponse]](), + beginStart: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientStartResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.CloudServicesClientUpdateResponse]](), + } +} + +// CloudServicesServerTransport connects instances of armcompute.CloudServicesClient to instances of CloudServicesServer. +// Don't use this type directly, use NewCloudServicesServerTransport instead. +type CloudServicesServerTransport struct { + srv *CloudServicesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.CloudServicesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.CloudServicesClientDeleteResponse]] + beginDeleteInstances *tracker[azfake.PollerResponder[armcompute.CloudServicesClientDeleteInstancesResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.CloudServicesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armcompute.CloudServicesClientListAllResponse]] + beginPowerOff *tracker[azfake.PollerResponder[armcompute.CloudServicesClientPowerOffResponse]] + beginRebuild *tracker[azfake.PollerResponder[armcompute.CloudServicesClientRebuildResponse]] + beginReimage *tracker[azfake.PollerResponder[armcompute.CloudServicesClientReimageResponse]] + beginRestart *tracker[azfake.PollerResponder[armcompute.CloudServicesClientRestartResponse]] + beginStart *tracker[azfake.PollerResponder[armcompute.CloudServicesClientStartResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.CloudServicesClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for CloudServicesServerTransport. +func (c *CloudServicesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CloudServicesClient.BeginCreateOrUpdate": + resp, err = c.dispatchBeginCreateOrUpdate(req) + case "CloudServicesClient.BeginDelete": + resp, err = c.dispatchBeginDelete(req) + case "CloudServicesClient.BeginDeleteInstances": + resp, err = c.dispatchBeginDeleteInstances(req) + case "CloudServicesClient.Get": + resp, err = c.dispatchGet(req) + case "CloudServicesClient.GetInstanceView": + resp, err = c.dispatchGetInstanceView(req) + case "CloudServicesClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + case "CloudServicesClient.NewListAllPager": + resp, err = c.dispatchNewListAllPager(req) + case "CloudServicesClient.BeginPowerOff": + resp, err = c.dispatchBeginPowerOff(req) + case "CloudServicesClient.BeginRebuild": + resp, err = c.dispatchBeginRebuild(req) + case "CloudServicesClient.BeginReimage": + resp, err = c.dispatchBeginReimage(req) + case "CloudServicesClient.BeginRestart": + resp, err = c.dispatchBeginRestart(req) + case "CloudServicesClient.BeginStart": + resp, err = c.dispatchBeginStart(req) + case "CloudServicesClient.BeginUpdate": + resp, err = c.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if c.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := c.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.CloudService](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, cloudServiceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + c.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + c.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + c.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if c.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := c.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginDelete(req.Context(), resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + c.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + c.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + c.beginDelete.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginDeleteInstances(req *http.Request) (*http.Response, error) { + if c.srv.BeginDeleteInstances == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeleteInstances not implemented")} + } + beginDeleteInstances := c.beginDeleteInstances.get(req) + if beginDeleteInstances == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/delete` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RoleInstances](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + var options *armcompute.CloudServicesClientBeginDeleteInstancesOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.CloudServicesClientBeginDeleteInstancesOptions{ + Parameters: &body, + } + } + respr, errRespr := c.srv.BeginDeleteInstances(req.Context(), resourceGroupNameParam, cloudServiceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeleteInstances = &respr + c.beginDeleteInstances.add(req, beginDeleteInstances) + } + + resp, err := server.PollerResponderNext(beginDeleteInstances, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginDeleteInstances.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeleteInstances) { + c.beginDeleteInstances.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CloudService, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchGetInstanceView(req *http.Request) (*http.Response, error) { + if c.srv.GetInstanceView == nil { + return nil, &nonRetriableError{errors.New("fake for method GetInstanceView not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/instanceView` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.GetInstanceView(req.Context(), resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CloudServiceInstanceView, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.CloudServicesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := c.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := c.srv.NewListAllPager(nil) + newListAllPager = &resp + c.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armcompute.CloudServicesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + c.newListAllPager.remove(req) + } + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginPowerOff(req *http.Request) (*http.Response, error) { + if c.srv.BeginPowerOff == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPowerOff not implemented")} + } + beginPowerOff := c.beginPowerOff.get(req) + if beginPowerOff == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/poweroff` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginPowerOff(req.Context(), resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPowerOff = &respr + c.beginPowerOff.add(req, beginPowerOff) + } + + resp, err := server.PollerResponderNext(beginPowerOff, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginPowerOff.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPowerOff) { + c.beginPowerOff.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginRebuild(req *http.Request) (*http.Response, error) { + if c.srv.BeginRebuild == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRebuild not implemented")} + } + beginRebuild := c.beginRebuild.get(req) + if beginRebuild == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rebuild` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RoleInstances](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + var options *armcompute.CloudServicesClientBeginRebuildOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.CloudServicesClientBeginRebuildOptions{ + Parameters: &body, + } + } + respr, errRespr := c.srv.BeginRebuild(req.Context(), resourceGroupNameParam, cloudServiceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRebuild = &respr + c.beginRebuild.add(req, beginRebuild) + } + + resp, err := server.PollerResponderNext(beginRebuild, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginRebuild.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRebuild) { + c.beginRebuild.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginReimage(req *http.Request) (*http.Response, error) { + if c.srv.BeginReimage == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReimage not implemented")} + } + beginReimage := c.beginReimage.get(req) + if beginReimage == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reimage` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RoleInstances](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + var options *armcompute.CloudServicesClientBeginReimageOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.CloudServicesClientBeginReimageOptions{ + Parameters: &body, + } + } + respr, errRespr := c.srv.BeginReimage(req.Context(), resourceGroupNameParam, cloudServiceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReimage = &respr + c.beginReimage.add(req, beginReimage) + } + + resp, err := server.PollerResponderNext(beginReimage, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginReimage.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReimage) { + c.beginReimage.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginRestart(req *http.Request) (*http.Response, error) { + if c.srv.BeginRestart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRestart not implemented")} + } + beginRestart := c.beginRestart.get(req) + if beginRestart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restart` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RoleInstances](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + var options *armcompute.CloudServicesClientBeginRestartOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.CloudServicesClientBeginRestartOptions{ + Parameters: &body, + } + } + respr, errRespr := c.srv.BeginRestart(req.Context(), resourceGroupNameParam, cloudServiceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRestart = &respr + c.beginRestart.add(req, beginRestart) + } + + resp, err := server.PollerResponderNext(beginRestart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginRestart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRestart) { + c.beginRestart.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginStart(req *http.Request) (*http.Response, error) { + if c.srv.BeginStart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStart not implemented")} + } + beginStart := c.beginStart.get(req) + if beginStart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/start` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginStart(req.Context(), resourceGroupNameParam, cloudServiceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStart = &respr + c.beginStart.add(req, beginStart) + } + + resp, err := server.PollerResponderNext(beginStart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginStart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStart) { + c.beginStart.remove(req) + } + + return resp, nil +} + +func (c *CloudServicesServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if c.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := c.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.CloudServiceUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginUpdate(req.Context(), resourceGroupNameParam, cloudServiceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + c.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + c.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudservicesupdatedomain_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudservicesupdatedomain_server.go new file mode 100644 index 00000000000..f15e7f3a29a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/cloudservicesupdatedomain_server.go @@ -0,0 +1,237 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// CloudServicesUpdateDomainServer is a fake server for instances of the armcompute.CloudServicesUpdateDomainClient type. +type CloudServicesUpdateDomainServer struct { + // GetUpdateDomain is the fake for method CloudServicesUpdateDomainClient.GetUpdateDomain + // HTTP status codes to indicate success: http.StatusOK + GetUpdateDomain func(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, options *armcompute.CloudServicesUpdateDomainClientGetUpdateDomainOptions) (resp azfake.Responder[armcompute.CloudServicesUpdateDomainClientGetUpdateDomainResponse], errResp azfake.ErrorResponder) + + // NewListUpdateDomainsPager is the fake for method CloudServicesUpdateDomainClient.NewListUpdateDomainsPager + // HTTP status codes to indicate success: http.StatusOK + NewListUpdateDomainsPager func(resourceGroupName string, cloudServiceName string, options *armcompute.CloudServicesUpdateDomainClientListUpdateDomainsOptions) (resp azfake.PagerResponder[armcompute.CloudServicesUpdateDomainClientListUpdateDomainsResponse]) + + // BeginWalkUpdateDomain is the fake for method CloudServicesUpdateDomainClient.BeginWalkUpdateDomain + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginWalkUpdateDomain func(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, parameters armcompute.UpdateDomain, options *armcompute.CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions) (resp azfake.PollerResponder[armcompute.CloudServicesUpdateDomainClientWalkUpdateDomainResponse], errResp azfake.ErrorResponder) +} + +// NewCloudServicesUpdateDomainServerTransport creates a new instance of CloudServicesUpdateDomainServerTransport with the provided implementation. +// The returned CloudServicesUpdateDomainServerTransport instance is connected to an instance of armcompute.CloudServicesUpdateDomainClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCloudServicesUpdateDomainServerTransport(srv *CloudServicesUpdateDomainServer) *CloudServicesUpdateDomainServerTransport { + return &CloudServicesUpdateDomainServerTransport{ + srv: srv, + newListUpdateDomainsPager: newTracker[azfake.PagerResponder[armcompute.CloudServicesUpdateDomainClientListUpdateDomainsResponse]](), + beginWalkUpdateDomain: newTracker[azfake.PollerResponder[armcompute.CloudServicesUpdateDomainClientWalkUpdateDomainResponse]](), + } +} + +// CloudServicesUpdateDomainServerTransport connects instances of armcompute.CloudServicesUpdateDomainClient to instances of CloudServicesUpdateDomainServer. +// Don't use this type directly, use NewCloudServicesUpdateDomainServerTransport instead. +type CloudServicesUpdateDomainServerTransport struct { + srv *CloudServicesUpdateDomainServer + newListUpdateDomainsPager *tracker[azfake.PagerResponder[armcompute.CloudServicesUpdateDomainClientListUpdateDomainsResponse]] + beginWalkUpdateDomain *tracker[azfake.PollerResponder[armcompute.CloudServicesUpdateDomainClientWalkUpdateDomainResponse]] +} + +// Do implements the policy.Transporter interface for CloudServicesUpdateDomainServerTransport. +func (c *CloudServicesUpdateDomainServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CloudServicesUpdateDomainClient.GetUpdateDomain": + resp, err = c.dispatchGetUpdateDomain(req) + case "CloudServicesUpdateDomainClient.NewListUpdateDomainsPager": + resp, err = c.dispatchNewListUpdateDomainsPager(req) + case "CloudServicesUpdateDomainClient.BeginWalkUpdateDomain": + resp, err = c.dispatchBeginWalkUpdateDomain(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CloudServicesUpdateDomainServerTransport) dispatchGetUpdateDomain(req *http.Request) (*http.Response, error) { + if c.srv.GetUpdateDomain == nil { + return nil, &nonRetriableError{errors.New("fake for method GetUpdateDomain not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/updateDomains/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + updateDomainUnescaped, err := url.PathUnescape(matches[regex.SubexpIndex("updateDomain")]) + if err != nil { + return nil, err + } + updateDomainParam, err := parseWithCast(updateDomainUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.GetUpdateDomain(req.Context(), resourceGroupNameParam, cloudServiceNameParam, updateDomainParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).UpdateDomain, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CloudServicesUpdateDomainServerTransport) dispatchNewListUpdateDomainsPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListUpdateDomainsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListUpdateDomainsPager not implemented")} + } + newListUpdateDomainsPager := c.newListUpdateDomainsPager.get(req) + if newListUpdateDomainsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/updateDomains` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListUpdateDomainsPager(resourceGroupNameParam, cloudServiceNameParam, nil) + newListUpdateDomainsPager = &resp + c.newListUpdateDomainsPager.add(req, newListUpdateDomainsPager) + server.PagerResponderInjectNextLinks(newListUpdateDomainsPager, req, func(page *armcompute.CloudServicesUpdateDomainClientListUpdateDomainsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListUpdateDomainsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListUpdateDomainsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListUpdateDomainsPager) { + c.newListUpdateDomainsPager.remove(req) + } + return resp, nil +} + +func (c *CloudServicesUpdateDomainServerTransport) dispatchBeginWalkUpdateDomain(req *http.Request) (*http.Response, error) { + if c.srv.BeginWalkUpdateDomain == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginWalkUpdateDomain not implemented")} + } + beginWalkUpdateDomain := c.beginWalkUpdateDomain.get(req) + if beginWalkUpdateDomain == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/updateDomains/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.UpdateDomain](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + updateDomainUnescaped, err := url.PathUnescape(matches[regex.SubexpIndex("updateDomain")]) + if err != nil { + return nil, err + } + updateDomainParam, err := parseWithCast(updateDomainUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginWalkUpdateDomain(req.Context(), resourceGroupNameParam, cloudServiceNameParam, updateDomainParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginWalkUpdateDomain = &respr + c.beginWalkUpdateDomain.add(req, beginWalkUpdateDomain) + } + + resp, err := server.PollerResponderNext(beginWalkUpdateDomain, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginWalkUpdateDomain.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginWalkUpdateDomain) { + c.beginWalkUpdateDomain.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleries_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleries_server.go new file mode 100644 index 00000000000..ec6d3dcc8f3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleries_server.go @@ -0,0 +1,100 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CommunityGalleriesServer is a fake server for instances of the armcompute.CommunityGalleriesClient type. +type CommunityGalleriesServer struct { + // Get is the fake for method CommunityGalleriesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, publicGalleryName string, options *armcompute.CommunityGalleriesClientGetOptions) (resp azfake.Responder[armcompute.CommunityGalleriesClientGetResponse], errResp azfake.ErrorResponder) +} + +// NewCommunityGalleriesServerTransport creates a new instance of CommunityGalleriesServerTransport with the provided implementation. +// The returned CommunityGalleriesServerTransport instance is connected to an instance of armcompute.CommunityGalleriesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCommunityGalleriesServerTransport(srv *CommunityGalleriesServer) *CommunityGalleriesServerTransport { + return &CommunityGalleriesServerTransport{srv: srv} +} + +// CommunityGalleriesServerTransport connects instances of armcompute.CommunityGalleriesClient to instances of CommunityGalleriesServer. +// Don't use this type directly, use NewCommunityGalleriesServerTransport instead. +type CommunityGalleriesServerTransport struct { + srv *CommunityGalleriesServer +} + +// Do implements the policy.Transporter interface for CommunityGalleriesServerTransport. +func (c *CommunityGalleriesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CommunityGalleriesClient.Get": + resp, err = c.dispatchGet(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CommunityGalleriesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/communityGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publicGalleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicGalleryName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), locationParam, publicGalleryNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CommunityGallery, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleryimages_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleryimages_server.go new file mode 100644 index 00000000000..9c776344c8c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleryimages_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CommunityGalleryImagesServer is a fake server for instances of the armcompute.CommunityGalleryImagesClient type. +type CommunityGalleryImagesServer struct { + // Get is the fake for method CommunityGalleryImagesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, publicGalleryName string, galleryImageName string, options *armcompute.CommunityGalleryImagesClientGetOptions) (resp azfake.Responder[armcompute.CommunityGalleryImagesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method CommunityGalleryImagesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, publicGalleryName string, options *armcompute.CommunityGalleryImagesClientListOptions) (resp azfake.PagerResponder[armcompute.CommunityGalleryImagesClientListResponse]) +} + +// NewCommunityGalleryImagesServerTransport creates a new instance of CommunityGalleryImagesServerTransport with the provided implementation. +// The returned CommunityGalleryImagesServerTransport instance is connected to an instance of armcompute.CommunityGalleryImagesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCommunityGalleryImagesServerTransport(srv *CommunityGalleryImagesServer) *CommunityGalleryImagesServerTransport { + return &CommunityGalleryImagesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.CommunityGalleryImagesClientListResponse]](), + } +} + +// CommunityGalleryImagesServerTransport connects instances of armcompute.CommunityGalleryImagesClient to instances of CommunityGalleryImagesServer. +// Don't use this type directly, use NewCommunityGalleryImagesServerTransport instead. +type CommunityGalleryImagesServerTransport struct { + srv *CommunityGalleryImagesServer + newListPager *tracker[azfake.PagerResponder[armcompute.CommunityGalleryImagesClientListResponse]] +} + +// Do implements the policy.Transporter interface for CommunityGalleryImagesServerTransport. +func (c *CommunityGalleryImagesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CommunityGalleryImagesClient.Get": + resp, err = c.dispatchGet(req) + case "CommunityGalleryImagesClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CommunityGalleryImagesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/communityGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publicGalleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicGalleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), locationParam, publicGalleryNameParam, galleryImageNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CommunityGalleryImage, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CommunityGalleryImagesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/communityGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publicGalleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicGalleryName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(locationParam, publicGalleryNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.CommunityGalleryImagesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleryimageversions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleryimageversions_server.go new file mode 100644 index 00000000000..171c06effb5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/communitygalleryimageversions_server.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// CommunityGalleryImageVersionsServer is a fake server for instances of the armcompute.CommunityGalleryImageVersionsClient type. +type CommunityGalleryImageVersionsServer struct { + // Get is the fake for method CommunityGalleryImageVersionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, publicGalleryName string, galleryImageName string, galleryImageVersionName string, options *armcompute.CommunityGalleryImageVersionsClientGetOptions) (resp azfake.Responder[armcompute.CommunityGalleryImageVersionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method CommunityGalleryImageVersionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, publicGalleryName string, galleryImageName string, options *armcompute.CommunityGalleryImageVersionsClientListOptions) (resp azfake.PagerResponder[armcompute.CommunityGalleryImageVersionsClientListResponse]) +} + +// NewCommunityGalleryImageVersionsServerTransport creates a new instance of CommunityGalleryImageVersionsServerTransport with the provided implementation. +// The returned CommunityGalleryImageVersionsServerTransport instance is connected to an instance of armcompute.CommunityGalleryImageVersionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCommunityGalleryImageVersionsServerTransport(srv *CommunityGalleryImageVersionsServer) *CommunityGalleryImageVersionsServerTransport { + return &CommunityGalleryImageVersionsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.CommunityGalleryImageVersionsClientListResponse]](), + } +} + +// CommunityGalleryImageVersionsServerTransport connects instances of armcompute.CommunityGalleryImageVersionsClient to instances of CommunityGalleryImageVersionsServer. +// Don't use this type directly, use NewCommunityGalleryImageVersionsServerTransport instead. +type CommunityGalleryImageVersionsServerTransport struct { + srv *CommunityGalleryImageVersionsServer + newListPager *tracker[azfake.PagerResponder[armcompute.CommunityGalleryImageVersionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for CommunityGalleryImageVersionsServerTransport. +func (c *CommunityGalleryImageVersionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CommunityGalleryImageVersionsClient.Get": + resp, err = c.dispatchGet(req) + case "CommunityGalleryImageVersionsClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CommunityGalleryImageVersionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/communityGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publicGalleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicGalleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + galleryImageVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), locationParam, publicGalleryNameParam, galleryImageNameParam, galleryImageVersionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CommunityGalleryImageVersion, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CommunityGalleryImageVersionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/communityGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publicGalleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicGalleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(locationParam, publicGalleryNameParam, galleryImageNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.CommunityGalleryImageVersionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/dedicatedhostgroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/dedicatedhostgroups_server.go new file mode 100644 index 00000000000..f1ef126130d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/dedicatedhostgroups_server.go @@ -0,0 +1,326 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// DedicatedHostGroupsServer is a fake server for instances of the armcompute.DedicatedHostGroupsClient type. +type DedicatedHostGroupsServer struct { + // CreateOrUpdate is the fake for method DedicatedHostGroupsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, hostGroupName string, parameters armcompute.DedicatedHostGroup, options *armcompute.DedicatedHostGroupsClientCreateOrUpdateOptions) (resp azfake.Responder[armcompute.DedicatedHostGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method DedicatedHostGroupsClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, resourceGroupName string, hostGroupName string, options *armcompute.DedicatedHostGroupsClientDeleteOptions) (resp azfake.Responder[armcompute.DedicatedHostGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DedicatedHostGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, hostGroupName string, options *armcompute.DedicatedHostGroupsClientGetOptions) (resp azfake.Responder[armcompute.DedicatedHostGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByResourceGroupPager is the fake for method DedicatedHostGroupsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.DedicatedHostGroupsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.DedicatedHostGroupsClientListByResourceGroupResponse]) + + // NewListBySubscriptionPager is the fake for method DedicatedHostGroupsClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armcompute.DedicatedHostGroupsClientListBySubscriptionOptions) (resp azfake.PagerResponder[armcompute.DedicatedHostGroupsClientListBySubscriptionResponse]) + + // Update is the fake for method DedicatedHostGroupsClient.Update + // HTTP status codes to indicate success: http.StatusOK + Update func(ctx context.Context, resourceGroupName string, hostGroupName string, parameters armcompute.DedicatedHostGroupUpdate, options *armcompute.DedicatedHostGroupsClientUpdateOptions) (resp azfake.Responder[armcompute.DedicatedHostGroupsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewDedicatedHostGroupsServerTransport creates a new instance of DedicatedHostGroupsServerTransport with the provided implementation. +// The returned DedicatedHostGroupsServerTransport instance is connected to an instance of armcompute.DedicatedHostGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDedicatedHostGroupsServerTransport(srv *DedicatedHostGroupsServer) *DedicatedHostGroupsServerTransport { + return &DedicatedHostGroupsServerTransport{ + srv: srv, + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.DedicatedHostGroupsClientListByResourceGroupResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armcompute.DedicatedHostGroupsClientListBySubscriptionResponse]](), + } +} + +// DedicatedHostGroupsServerTransport connects instances of armcompute.DedicatedHostGroupsClient to instances of DedicatedHostGroupsServer. +// Don't use this type directly, use NewDedicatedHostGroupsServerTransport instead. +type DedicatedHostGroupsServerTransport struct { + srv *DedicatedHostGroupsServer + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.DedicatedHostGroupsClientListByResourceGroupResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armcompute.DedicatedHostGroupsClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for DedicatedHostGroupsServerTransport. +func (d *DedicatedHostGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DedicatedHostGroupsClient.CreateOrUpdate": + resp, err = d.dispatchCreateOrUpdate(req) + case "DedicatedHostGroupsClient.Delete": + resp, err = d.dispatchDelete(req) + case "DedicatedHostGroupsClient.Get": + resp, err = d.dispatchGet(req) + case "DedicatedHostGroupsClient.NewListByResourceGroupPager": + resp, err = d.dispatchNewListByResourceGroupPager(req) + case "DedicatedHostGroupsClient.NewListBySubscriptionPager": + resp, err = d.dispatchNewListBySubscriptionPager(req) + case "DedicatedHostGroupsClient.Update": + resp, err = d.dispatchUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DedicatedHostGroupsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DedicatedHostGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, hostGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DedicatedHostGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DedicatedHostGroupsServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if d.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Delete(req.Context(), resourceGroupNameParam, hostGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DedicatedHostGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.InstanceViewTypes(expandUnescaped)) + var options *armcompute.DedicatedHostGroupsClientGetOptions + if expandParam != nil { + options = &armcompute.DedicatedHostGroupsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, hostGroupNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DedicatedHostGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DedicatedHostGroupsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := d.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + d.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.DedicatedHostGroupsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + d.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (d *DedicatedHostGroupsServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := d.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := d.srv.NewListBySubscriptionPager(nil) + newListBySubscriptionPager = &resp + d.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armcompute.DedicatedHostGroupsClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + d.newListBySubscriptionPager.remove(req) + } + return resp, nil +} + +func (d *DedicatedHostGroupsServerTransport) dispatchUpdate(req *http.Request) (*http.Response, error) { + if d.srv.Update == nil { + return nil, &nonRetriableError{errors.New("fake for method Update not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DedicatedHostGroupUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Update(req.Context(), resourceGroupNameParam, hostGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DedicatedHostGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/dedicatedhosts_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/dedicatedhosts_server.go new file mode 100644 index 00000000000..6fbe44013b6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/dedicatedhosts_server.go @@ -0,0 +1,506 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// DedicatedHostsServer is a fake server for instances of the armcompute.DedicatedHostsClient type. +type DedicatedHostsServer struct { + // BeginCreateOrUpdate is the fake for method DedicatedHostsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters armcompute.DedicatedHost, options *armcompute.DedicatedHostsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.DedicatedHostsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method DedicatedHostsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *armcompute.DedicatedHostsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.DedicatedHostsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DedicatedHostsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *armcompute.DedicatedHostsClientGetOptions) (resp azfake.Responder[armcompute.DedicatedHostsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListAvailableSizesPager is the fake for method DedicatedHostsClient.NewListAvailableSizesPager + // HTTP status codes to indicate success: http.StatusOK + NewListAvailableSizesPager func(resourceGroupName string, hostGroupName string, hostName string, options *armcompute.DedicatedHostsClientListAvailableSizesOptions) (resp azfake.PagerResponder[armcompute.DedicatedHostsClientListAvailableSizesResponse]) + + // NewListByHostGroupPager is the fake for method DedicatedHostsClient.NewListByHostGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByHostGroupPager func(resourceGroupName string, hostGroupName string, options *armcompute.DedicatedHostsClientListByHostGroupOptions) (resp azfake.PagerResponder[armcompute.DedicatedHostsClientListByHostGroupResponse]) + + // BeginRedeploy is the fake for method DedicatedHostsClient.BeginRedeploy + // HTTP status codes to indicate success: http.StatusAccepted + BeginRedeploy func(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *armcompute.DedicatedHostsClientBeginRedeployOptions) (resp azfake.PollerResponder[armcompute.DedicatedHostsClientRedeployResponse], errResp azfake.ErrorResponder) + + // BeginRestart is the fake for method DedicatedHostsClient.BeginRestart + // HTTP status codes to indicate success: http.StatusOK + BeginRestart func(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *armcompute.DedicatedHostsClientBeginRestartOptions) (resp azfake.PollerResponder[armcompute.DedicatedHostsClientRestartResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method DedicatedHostsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters armcompute.DedicatedHostUpdate, options *armcompute.DedicatedHostsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.DedicatedHostsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewDedicatedHostsServerTransport creates a new instance of DedicatedHostsServerTransport with the provided implementation. +// The returned DedicatedHostsServerTransport instance is connected to an instance of armcompute.DedicatedHostsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDedicatedHostsServerTransport(srv *DedicatedHostsServer) *DedicatedHostsServerTransport { + return &DedicatedHostsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.DedicatedHostsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.DedicatedHostsClientDeleteResponse]](), + newListAvailableSizesPager: newTracker[azfake.PagerResponder[armcompute.DedicatedHostsClientListAvailableSizesResponse]](), + newListByHostGroupPager: newTracker[azfake.PagerResponder[armcompute.DedicatedHostsClientListByHostGroupResponse]](), + beginRedeploy: newTracker[azfake.PollerResponder[armcompute.DedicatedHostsClientRedeployResponse]](), + beginRestart: newTracker[azfake.PollerResponder[armcompute.DedicatedHostsClientRestartResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.DedicatedHostsClientUpdateResponse]](), + } +} + +// DedicatedHostsServerTransport connects instances of armcompute.DedicatedHostsClient to instances of DedicatedHostsServer. +// Don't use this type directly, use NewDedicatedHostsServerTransport instead. +type DedicatedHostsServerTransport struct { + srv *DedicatedHostsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.DedicatedHostsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.DedicatedHostsClientDeleteResponse]] + newListAvailableSizesPager *tracker[azfake.PagerResponder[armcompute.DedicatedHostsClientListAvailableSizesResponse]] + newListByHostGroupPager *tracker[azfake.PagerResponder[armcompute.DedicatedHostsClientListByHostGroupResponse]] + beginRedeploy *tracker[azfake.PollerResponder[armcompute.DedicatedHostsClientRedeployResponse]] + beginRestart *tracker[azfake.PollerResponder[armcompute.DedicatedHostsClientRestartResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.DedicatedHostsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for DedicatedHostsServerTransport. +func (d *DedicatedHostsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DedicatedHostsClient.BeginCreateOrUpdate": + resp, err = d.dispatchBeginCreateOrUpdate(req) + case "DedicatedHostsClient.BeginDelete": + resp, err = d.dispatchBeginDelete(req) + case "DedicatedHostsClient.Get": + resp, err = d.dispatchGet(req) + case "DedicatedHostsClient.NewListAvailableSizesPager": + resp, err = d.dispatchNewListAvailableSizesPager(req) + case "DedicatedHostsClient.NewListByHostGroupPager": + resp, err = d.dispatchNewListByHostGroupPager(req) + case "DedicatedHostsClient.BeginRedeploy": + resp, err = d.dispatchBeginRedeploy(req) + case "DedicatedHostsClient.BeginRestart": + resp, err = d.dispatchBeginRestart(req) + case "DedicatedHostsClient.BeginUpdate": + resp, err = d.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := d.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DedicatedHost](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + hostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, hostGroupNameParam, hostNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + d.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + d.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + d.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if d.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := d.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + hostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDelete(req.Context(), resourceGroupNameParam, hostGroupNameParam, hostNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + d.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + d.beginDelete.remove(req) + } + + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + hostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.InstanceViewTypes(expandUnescaped)) + var options *armcompute.DedicatedHostsClientGetOptions + if expandParam != nil { + options = &armcompute.DedicatedHostsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, hostGroupNameParam, hostNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DedicatedHost, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchNewListAvailableSizesPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListAvailableSizesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAvailableSizesPager not implemented")} + } + newListAvailableSizesPager := d.newListAvailableSizesPager.get(req) + if newListAvailableSizesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hostSizes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + hostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListAvailableSizesPager(resourceGroupNameParam, hostGroupNameParam, hostNameParam, nil) + newListAvailableSizesPager = &resp + d.newListAvailableSizesPager.add(req, newListAvailableSizesPager) + } + resp, err := server.PagerResponderNext(newListAvailableSizesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListAvailableSizesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAvailableSizesPager) { + d.newListAvailableSizesPager.remove(req) + } + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchNewListByHostGroupPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListByHostGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByHostGroupPager not implemented")} + } + newListByHostGroupPager := d.newListByHostGroupPager.get(req) + if newListByHostGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListByHostGroupPager(resourceGroupNameParam, hostGroupNameParam, nil) + newListByHostGroupPager = &resp + d.newListByHostGroupPager.add(req, newListByHostGroupPager) + server.PagerResponderInjectNextLinks(newListByHostGroupPager, req, func(page *armcompute.DedicatedHostsClientListByHostGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByHostGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListByHostGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByHostGroupPager) { + d.newListByHostGroupPager.remove(req) + } + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchBeginRedeploy(req *http.Request) (*http.Response, error) { + if d.srv.BeginRedeploy == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRedeploy not implemented")} + } + beginRedeploy := d.beginRedeploy.get(req) + if beginRedeploy == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/redeploy` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + hostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginRedeploy(req.Context(), resourceGroupNameParam, hostGroupNameParam, hostNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRedeploy = &respr + d.beginRedeploy.add(req, beginRedeploy) + } + + resp, err := server.PollerResponderNext(beginRedeploy, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted}, resp.StatusCode) { + d.beginRedeploy.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRedeploy) { + d.beginRedeploy.remove(req) + } + + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchBeginRestart(req *http.Request) (*http.Response, error) { + if d.srv.BeginRestart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRestart not implemented")} + } + beginRestart := d.beginRestart.get(req) + if beginRestart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restart` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + hostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginRestart(req.Context(), resourceGroupNameParam, hostGroupNameParam, hostNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRestart = &respr + d.beginRestart.add(req, beginRestart) + } + + resp, err := server.PollerResponderNext(beginRestart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.beginRestart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRestart) { + d.beginRestart.remove(req) + } + + return resp, nil +} + +func (d *DedicatedHostsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := d.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/hostGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DedicatedHostUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hostGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostGroupName")]) + if err != nil { + return nil, err + } + hostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hostName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginUpdate(req.Context(), resourceGroupNameParam, hostGroupNameParam, hostNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + d.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + d.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskaccesses_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskaccesses_server.go new file mode 100644 index 00000000000..f3fd1f7b1ad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskaccesses_server.go @@ -0,0 +1,600 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// DiskAccessesServer is a fake server for instances of the armcompute.DiskAccessesClient type. +type DiskAccessesServer struct { + // BeginCreateOrUpdate is the fake for method DiskAccessesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess armcompute.DiskAccess, options *armcompute.DiskAccessesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.DiskAccessesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method DiskAccessesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, diskAccessName string, options *armcompute.DiskAccessesClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.DiskAccessesClientDeleteResponse], errResp azfake.ErrorResponder) + + // BeginDeleteAPrivateEndpointConnection is the fake for method DiskAccessesClient.BeginDeleteAPrivateEndpointConnection + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDeleteAPrivateEndpointConnection func(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *armcompute.DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions) (resp azfake.PollerResponder[armcompute.DiskAccessesClientDeleteAPrivateEndpointConnectionResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DiskAccessesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, diskAccessName string, options *armcompute.DiskAccessesClientGetOptions) (resp azfake.Responder[armcompute.DiskAccessesClientGetResponse], errResp azfake.ErrorResponder) + + // GetAPrivateEndpointConnection is the fake for method DiskAccessesClient.GetAPrivateEndpointConnection + // HTTP status codes to indicate success: http.StatusOK + GetAPrivateEndpointConnection func(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *armcompute.DiskAccessesClientGetAPrivateEndpointConnectionOptions) (resp azfake.Responder[armcompute.DiskAccessesClientGetAPrivateEndpointConnectionResponse], errResp azfake.ErrorResponder) + + // GetPrivateLinkResources is the fake for method DiskAccessesClient.GetPrivateLinkResources + // HTTP status codes to indicate success: http.StatusOK + GetPrivateLinkResources func(ctx context.Context, resourceGroupName string, diskAccessName string, options *armcompute.DiskAccessesClientGetPrivateLinkResourcesOptions) (resp azfake.Responder[armcompute.DiskAccessesClientGetPrivateLinkResourcesResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method DiskAccessesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.DiskAccessesClientListOptions) (resp azfake.PagerResponder[armcompute.DiskAccessesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method DiskAccessesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.DiskAccessesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.DiskAccessesClientListByResourceGroupResponse]) + + // NewListPrivateEndpointConnectionsPager is the fake for method DiskAccessesClient.NewListPrivateEndpointConnectionsPager + // HTTP status codes to indicate success: http.StatusOK + NewListPrivateEndpointConnectionsPager func(resourceGroupName string, diskAccessName string, options *armcompute.DiskAccessesClientListPrivateEndpointConnectionsOptions) (resp azfake.PagerResponder[armcompute.DiskAccessesClientListPrivateEndpointConnectionsResponse]) + + // BeginUpdate is the fake for method DiskAccessesClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess armcompute.DiskAccessUpdate, options *armcompute.DiskAccessesClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.DiskAccessesClientUpdateResponse], errResp azfake.ErrorResponder) + + // BeginUpdateAPrivateEndpointConnection is the fake for method DiskAccessesClient.BeginUpdateAPrivateEndpointConnection + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateAPrivateEndpointConnection func(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection armcompute.PrivateEndpointConnection, options *armcompute.DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions) (resp azfake.PollerResponder[armcompute.DiskAccessesClientUpdateAPrivateEndpointConnectionResponse], errResp azfake.ErrorResponder) +} + +// NewDiskAccessesServerTransport creates a new instance of DiskAccessesServerTransport with the provided implementation. +// The returned DiskAccessesServerTransport instance is connected to an instance of armcompute.DiskAccessesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDiskAccessesServerTransport(srv *DiskAccessesServer) *DiskAccessesServerTransport { + return &DiskAccessesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.DiskAccessesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.DiskAccessesClientDeleteResponse]](), + beginDeleteAPrivateEndpointConnection: newTracker[azfake.PollerResponder[armcompute.DiskAccessesClientDeleteAPrivateEndpointConnectionResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.DiskAccessesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.DiskAccessesClientListByResourceGroupResponse]](), + newListPrivateEndpointConnectionsPager: newTracker[azfake.PagerResponder[armcompute.DiskAccessesClientListPrivateEndpointConnectionsResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.DiskAccessesClientUpdateResponse]](), + beginUpdateAPrivateEndpointConnection: newTracker[azfake.PollerResponder[armcompute.DiskAccessesClientUpdateAPrivateEndpointConnectionResponse]](), + } +} + +// DiskAccessesServerTransport connects instances of armcompute.DiskAccessesClient to instances of DiskAccessesServer. +// Don't use this type directly, use NewDiskAccessesServerTransport instead. +type DiskAccessesServerTransport struct { + srv *DiskAccessesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.DiskAccessesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.DiskAccessesClientDeleteResponse]] + beginDeleteAPrivateEndpointConnection *tracker[azfake.PollerResponder[armcompute.DiskAccessesClientDeleteAPrivateEndpointConnectionResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.DiskAccessesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.DiskAccessesClientListByResourceGroupResponse]] + newListPrivateEndpointConnectionsPager *tracker[azfake.PagerResponder[armcompute.DiskAccessesClientListPrivateEndpointConnectionsResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.DiskAccessesClientUpdateResponse]] + beginUpdateAPrivateEndpointConnection *tracker[azfake.PollerResponder[armcompute.DiskAccessesClientUpdateAPrivateEndpointConnectionResponse]] +} + +// Do implements the policy.Transporter interface for DiskAccessesServerTransport. +func (d *DiskAccessesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DiskAccessesClient.BeginCreateOrUpdate": + resp, err = d.dispatchBeginCreateOrUpdate(req) + case "DiskAccessesClient.BeginDelete": + resp, err = d.dispatchBeginDelete(req) + case "DiskAccessesClient.BeginDeleteAPrivateEndpointConnection": + resp, err = d.dispatchBeginDeleteAPrivateEndpointConnection(req) + case "DiskAccessesClient.Get": + resp, err = d.dispatchGet(req) + case "DiskAccessesClient.GetAPrivateEndpointConnection": + resp, err = d.dispatchGetAPrivateEndpointConnection(req) + case "DiskAccessesClient.GetPrivateLinkResources": + resp, err = d.dispatchGetPrivateLinkResources(req) + case "DiskAccessesClient.NewListPager": + resp, err = d.dispatchNewListPager(req) + case "DiskAccessesClient.NewListByResourceGroupPager": + resp, err = d.dispatchNewListByResourceGroupPager(req) + case "DiskAccessesClient.NewListPrivateEndpointConnectionsPager": + resp, err = d.dispatchNewListPrivateEndpointConnectionsPager(req) + case "DiskAccessesClient.BeginUpdate": + resp, err = d.dispatchBeginUpdate(req) + case "DiskAccessesClient.BeginUpdateAPrivateEndpointConnection": + resp, err = d.dispatchBeginUpdateAPrivateEndpointConnection(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := d.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DiskAccess](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, diskAccessNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + d.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + d.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if d.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := d.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDelete(req.Context(), resourceGroupNameParam, diskAccessNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + d.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + d.beginDelete.remove(req) + } + + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchBeginDeleteAPrivateEndpointConnection(req *http.Request) (*http.Response, error) { + if d.srv.BeginDeleteAPrivateEndpointConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeleteAPrivateEndpointConnection not implemented")} + } + beginDeleteAPrivateEndpointConnection := d.beginDeleteAPrivateEndpointConnection.get(req) + if beginDeleteAPrivateEndpointConnection == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + privateEndpointConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDeleteAPrivateEndpointConnection(req.Context(), resourceGroupNameParam, diskAccessNameParam, privateEndpointConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeleteAPrivateEndpointConnection = &respr + d.beginDeleteAPrivateEndpointConnection.add(req, beginDeleteAPrivateEndpointConnection) + } + + resp, err := server.PollerResponderNext(beginDeleteAPrivateEndpointConnection, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDeleteAPrivateEndpointConnection.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeleteAPrivateEndpointConnection) { + d.beginDeleteAPrivateEndpointConnection.remove(req) + } + + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, diskAccessNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DiskAccess, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchGetAPrivateEndpointConnection(req *http.Request) (*http.Response, error) { + if d.srv.GetAPrivateEndpointConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method GetAPrivateEndpointConnection not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + privateEndpointConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.GetAPrivateEndpointConnection(req.Context(), resourceGroupNameParam, diskAccessNameParam, privateEndpointConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PrivateEndpointConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchGetPrivateLinkResources(req *http.Request) (*http.Response, error) { + if d.srv.GetPrivateLinkResources == nil { + return nil, &nonRetriableError{errors.New("fake for method GetPrivateLinkResources not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateLinkResources` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.GetPrivateLinkResources(req.Context(), resourceGroupNameParam, diskAccessNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PrivateLinkResourceListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := d.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := d.srv.NewListPager(nil) + newListPager = &resp + d.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.DiskAccessesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + d.newListPager.remove(req) + } + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := d.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + d.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.DiskAccessesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + d.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchNewListPrivateEndpointConnectionsPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListPrivateEndpointConnectionsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPrivateEndpointConnectionsPager not implemented")} + } + newListPrivateEndpointConnectionsPager := d.newListPrivateEndpointConnectionsPager.get(req) + if newListPrivateEndpointConnectionsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListPrivateEndpointConnectionsPager(resourceGroupNameParam, diskAccessNameParam, nil) + newListPrivateEndpointConnectionsPager = &resp + d.newListPrivateEndpointConnectionsPager.add(req, newListPrivateEndpointConnectionsPager) + server.PagerResponderInjectNextLinks(newListPrivateEndpointConnectionsPager, req, func(page *armcompute.DiskAccessesClientListPrivateEndpointConnectionsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPrivateEndpointConnectionsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListPrivateEndpointConnectionsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPrivateEndpointConnectionsPager) { + d.newListPrivateEndpointConnectionsPager.remove(req) + } + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := d.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DiskAccessUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginUpdate(req.Context(), resourceGroupNameParam, diskAccessNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + d.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + d.beginUpdate.remove(req) + } + + return resp, nil +} + +func (d *DiskAccessesServerTransport) dispatchBeginUpdateAPrivateEndpointConnection(req *http.Request) (*http.Response, error) { + if d.srv.BeginUpdateAPrivateEndpointConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateAPrivateEndpointConnection not implemented")} + } + beginUpdateAPrivateEndpointConnection := d.beginUpdateAPrivateEndpointConnection.get(req) + if beginUpdateAPrivateEndpointConnection == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskAccesses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.PrivateEndpointConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskAccessNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskAccessName")]) + if err != nil { + return nil, err + } + privateEndpointConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginUpdateAPrivateEndpointConnection(req.Context(), resourceGroupNameParam, diskAccessNameParam, privateEndpointConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateAPrivateEndpointConnection = &respr + d.beginUpdateAPrivateEndpointConnection.add(req, beginUpdateAPrivateEndpointConnection) + } + + resp, err := server.PollerResponderNext(beginUpdateAPrivateEndpointConnection, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginUpdateAPrivateEndpointConnection.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateAPrivateEndpointConnection) { + d.beginUpdateAPrivateEndpointConnection.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskencryptionsets_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskencryptionsets_server.go new file mode 100644 index 00000000000..1ecd0d14e45 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskencryptionsets_server.go @@ -0,0 +1,402 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// DiskEncryptionSetsServer is a fake server for instances of the armcompute.DiskEncryptionSetsClient type. +type DiskEncryptionSetsServer struct { + // BeginCreateOrUpdate is the fake for method DiskEncryptionSetsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet armcompute.DiskEncryptionSet, options *armcompute.DiskEncryptionSetsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.DiskEncryptionSetsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method DiskEncryptionSetsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *armcompute.DiskEncryptionSetsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.DiskEncryptionSetsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DiskEncryptionSetsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *armcompute.DiskEncryptionSetsClientGetOptions) (resp azfake.Responder[armcompute.DiskEncryptionSetsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method DiskEncryptionSetsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.DiskEncryptionSetsClientListOptions) (resp azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListResponse]) + + // NewListAssociatedResourcesPager is the fake for method DiskEncryptionSetsClient.NewListAssociatedResourcesPager + // HTTP status codes to indicate success: http.StatusOK + NewListAssociatedResourcesPager func(resourceGroupName string, diskEncryptionSetName string, options *armcompute.DiskEncryptionSetsClientListAssociatedResourcesOptions) (resp azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListAssociatedResourcesResponse]) + + // NewListByResourceGroupPager is the fake for method DiskEncryptionSetsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.DiskEncryptionSetsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListByResourceGroupResponse]) + + // BeginUpdate is the fake for method DiskEncryptionSetsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet armcompute.DiskEncryptionSetUpdate, options *armcompute.DiskEncryptionSetsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.DiskEncryptionSetsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewDiskEncryptionSetsServerTransport creates a new instance of DiskEncryptionSetsServerTransport with the provided implementation. +// The returned DiskEncryptionSetsServerTransport instance is connected to an instance of armcompute.DiskEncryptionSetsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDiskEncryptionSetsServerTransport(srv *DiskEncryptionSetsServer) *DiskEncryptionSetsServerTransport { + return &DiskEncryptionSetsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.DiskEncryptionSetsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.DiskEncryptionSetsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListResponse]](), + newListAssociatedResourcesPager: newTracker[azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListAssociatedResourcesResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListByResourceGroupResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.DiskEncryptionSetsClientUpdateResponse]](), + } +} + +// DiskEncryptionSetsServerTransport connects instances of armcompute.DiskEncryptionSetsClient to instances of DiskEncryptionSetsServer. +// Don't use this type directly, use NewDiskEncryptionSetsServerTransport instead. +type DiskEncryptionSetsServerTransport struct { + srv *DiskEncryptionSetsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.DiskEncryptionSetsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.DiskEncryptionSetsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListResponse]] + newListAssociatedResourcesPager *tracker[azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListAssociatedResourcesResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.DiskEncryptionSetsClientListByResourceGroupResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.DiskEncryptionSetsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for DiskEncryptionSetsServerTransport. +func (d *DiskEncryptionSetsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DiskEncryptionSetsClient.BeginCreateOrUpdate": + resp, err = d.dispatchBeginCreateOrUpdate(req) + case "DiskEncryptionSetsClient.BeginDelete": + resp, err = d.dispatchBeginDelete(req) + case "DiskEncryptionSetsClient.Get": + resp, err = d.dispatchGet(req) + case "DiskEncryptionSetsClient.NewListPager": + resp, err = d.dispatchNewListPager(req) + case "DiskEncryptionSetsClient.NewListAssociatedResourcesPager": + resp, err = d.dispatchNewListAssociatedResourcesPager(req) + case "DiskEncryptionSetsClient.NewListByResourceGroupPager": + resp, err = d.dispatchNewListByResourceGroupPager(req) + case "DiskEncryptionSetsClient.BeginUpdate": + resp, err = d.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DiskEncryptionSetsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := d.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskEncryptionSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DiskEncryptionSet](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskEncryptionSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskEncryptionSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, diskEncryptionSetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + d.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + d.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (d *DiskEncryptionSetsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if d.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := d.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskEncryptionSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskEncryptionSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskEncryptionSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDelete(req.Context(), resourceGroupNameParam, diskEncryptionSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + d.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + d.beginDelete.remove(req) + } + + return resp, nil +} + +func (d *DiskEncryptionSetsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskEncryptionSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskEncryptionSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskEncryptionSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, diskEncryptionSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DiskEncryptionSet, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DiskEncryptionSetsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := d.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskEncryptionSets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := d.srv.NewListPager(nil) + newListPager = &resp + d.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.DiskEncryptionSetsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + d.newListPager.remove(req) + } + return resp, nil +} + +func (d *DiskEncryptionSetsServerTransport) dispatchNewListAssociatedResourcesPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListAssociatedResourcesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAssociatedResourcesPager not implemented")} + } + newListAssociatedResourcesPager := d.newListAssociatedResourcesPager.get(req) + if newListAssociatedResourcesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskEncryptionSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/associatedResources` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskEncryptionSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskEncryptionSetName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListAssociatedResourcesPager(resourceGroupNameParam, diskEncryptionSetNameParam, nil) + newListAssociatedResourcesPager = &resp + d.newListAssociatedResourcesPager.add(req, newListAssociatedResourcesPager) + server.PagerResponderInjectNextLinks(newListAssociatedResourcesPager, req, func(page *armcompute.DiskEncryptionSetsClientListAssociatedResourcesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAssociatedResourcesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListAssociatedResourcesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAssociatedResourcesPager) { + d.newListAssociatedResourcesPager.remove(req) + } + return resp, nil +} + +func (d *DiskEncryptionSetsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := d.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskEncryptionSets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + d.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.DiskEncryptionSetsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + d.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (d *DiskEncryptionSetsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := d.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/diskEncryptionSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DiskEncryptionSetUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskEncryptionSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskEncryptionSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginUpdate(req.Context(), resourceGroupNameParam, diskEncryptionSetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + d.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + d.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskrestorepoint_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskrestorepoint_server.go new file mode 100644 index 00000000000..fc871e6124d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/diskrestorepoint_server.go @@ -0,0 +1,288 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// DiskRestorePointServer is a fake server for instances of the armcompute.DiskRestorePointClient type. +type DiskRestorePointServer struct { + // Get is the fake for method DiskRestorePointClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *armcompute.DiskRestorePointClientGetOptions) (resp azfake.Responder[armcompute.DiskRestorePointClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGrantAccess is the fake for method DiskRestorePointClient.BeginGrantAccess + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGrantAccess func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, grantAccessData armcompute.GrantAccessData, options *armcompute.DiskRestorePointClientBeginGrantAccessOptions) (resp azfake.PollerResponder[armcompute.DiskRestorePointClientGrantAccessResponse], errResp azfake.ErrorResponder) + + // NewListByRestorePointPager is the fake for method DiskRestorePointClient.NewListByRestorePointPager + // HTTP status codes to indicate success: http.StatusOK + NewListByRestorePointPager func(resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, options *armcompute.DiskRestorePointClientListByRestorePointOptions) (resp azfake.PagerResponder[armcompute.DiskRestorePointClientListByRestorePointResponse]) + + // BeginRevokeAccess is the fake for method DiskRestorePointClient.BeginRevokeAccess + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRevokeAccess func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *armcompute.DiskRestorePointClientBeginRevokeAccessOptions) (resp azfake.PollerResponder[armcompute.DiskRestorePointClientRevokeAccessResponse], errResp azfake.ErrorResponder) +} + +// NewDiskRestorePointServerTransport creates a new instance of DiskRestorePointServerTransport with the provided implementation. +// The returned DiskRestorePointServerTransport instance is connected to an instance of armcompute.DiskRestorePointClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDiskRestorePointServerTransport(srv *DiskRestorePointServer) *DiskRestorePointServerTransport { + return &DiskRestorePointServerTransport{ + srv: srv, + beginGrantAccess: newTracker[azfake.PollerResponder[armcompute.DiskRestorePointClientGrantAccessResponse]](), + newListByRestorePointPager: newTracker[azfake.PagerResponder[armcompute.DiskRestorePointClientListByRestorePointResponse]](), + beginRevokeAccess: newTracker[azfake.PollerResponder[armcompute.DiskRestorePointClientRevokeAccessResponse]](), + } +} + +// DiskRestorePointServerTransport connects instances of armcompute.DiskRestorePointClient to instances of DiskRestorePointServer. +// Don't use this type directly, use NewDiskRestorePointServerTransport instead. +type DiskRestorePointServerTransport struct { + srv *DiskRestorePointServer + beginGrantAccess *tracker[azfake.PollerResponder[armcompute.DiskRestorePointClientGrantAccessResponse]] + newListByRestorePointPager *tracker[azfake.PagerResponder[armcompute.DiskRestorePointClientListByRestorePointResponse]] + beginRevokeAccess *tracker[azfake.PollerResponder[armcompute.DiskRestorePointClientRevokeAccessResponse]] +} + +// Do implements the policy.Transporter interface for DiskRestorePointServerTransport. +func (d *DiskRestorePointServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DiskRestorePointClient.Get": + resp, err = d.dispatchGet(req) + case "DiskRestorePointClient.BeginGrantAccess": + resp, err = d.dispatchBeginGrantAccess(req) + case "DiskRestorePointClient.NewListByRestorePointPager": + resp, err = d.dispatchNewListByRestorePointPager(req) + case "DiskRestorePointClient.BeginRevokeAccess": + resp, err = d.dispatchBeginRevokeAccess(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DiskRestorePointServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/diskRestorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + vmRestorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmRestorePointName")]) + if err != nil { + return nil, err + } + diskRestorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskRestorePointName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, vmRestorePointNameParam, diskRestorePointNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DiskRestorePoint, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DiskRestorePointServerTransport) dispatchBeginGrantAccess(req *http.Request) (*http.Response, error) { + if d.srv.BeginGrantAccess == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGrantAccess not implemented")} + } + beginGrantAccess := d.beginGrantAccess.get(req) + if beginGrantAccess == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/diskRestorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/beginGetAccess` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GrantAccessData](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + vmRestorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmRestorePointName")]) + if err != nil { + return nil, err + } + diskRestorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskRestorePointName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginGrantAccess(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, vmRestorePointNameParam, diskRestorePointNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGrantAccess = &respr + d.beginGrantAccess.add(req, beginGrantAccess) + } + + resp, err := server.PollerResponderNext(beginGrantAccess, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginGrantAccess.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGrantAccess) { + d.beginGrantAccess.remove(req) + } + + return resp, nil +} + +func (d *DiskRestorePointServerTransport) dispatchNewListByRestorePointPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListByRestorePointPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByRestorePointPager not implemented")} + } + newListByRestorePointPager := d.newListByRestorePointPager.get(req) + if newListByRestorePointPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/diskRestorePoints` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + vmRestorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmRestorePointName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListByRestorePointPager(resourceGroupNameParam, restorePointCollectionNameParam, vmRestorePointNameParam, nil) + newListByRestorePointPager = &resp + d.newListByRestorePointPager.add(req, newListByRestorePointPager) + server.PagerResponderInjectNextLinks(newListByRestorePointPager, req, func(page *armcompute.DiskRestorePointClientListByRestorePointResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByRestorePointPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListByRestorePointPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByRestorePointPager) { + d.newListByRestorePointPager.remove(req) + } + return resp, nil +} + +func (d *DiskRestorePointServerTransport) dispatchBeginRevokeAccess(req *http.Request) (*http.Response, error) { + if d.srv.BeginRevokeAccess == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRevokeAccess not implemented")} + } + beginRevokeAccess := d.beginRevokeAccess.get(req) + if beginRevokeAccess == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/diskRestorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/endGetAccess` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + vmRestorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmRestorePointName")]) + if err != nil { + return nil, err + } + diskRestorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskRestorePointName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginRevokeAccess(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, vmRestorePointNameParam, diskRestorePointNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRevokeAccess = &respr + d.beginRevokeAccess.add(req, beginRevokeAccess) + } + + resp, err := server.PollerResponderNext(beginRevokeAccess, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginRevokeAccess.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRevokeAccess) { + d.beginRevokeAccess.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/disks_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/disks_server.go new file mode 100644 index 00000000000..6084b3568da --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/disks_server.go @@ -0,0 +1,461 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// DisksServer is a fake server for instances of the armcompute.DisksClient type. +type DisksServer struct { + // BeginCreateOrUpdate is the fake for method DisksClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, diskName string, disk armcompute.Disk, options *armcompute.DisksClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.DisksClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method DisksClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, diskName string, options *armcompute.DisksClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.DisksClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DisksClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, diskName string, options *armcompute.DisksClientGetOptions) (resp azfake.Responder[armcompute.DisksClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGrantAccess is the fake for method DisksClient.BeginGrantAccess + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGrantAccess func(ctx context.Context, resourceGroupName string, diskName string, grantAccessData armcompute.GrantAccessData, options *armcompute.DisksClientBeginGrantAccessOptions) (resp azfake.PollerResponder[armcompute.DisksClientGrantAccessResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method DisksClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.DisksClientListOptions) (resp azfake.PagerResponder[armcompute.DisksClientListResponse]) + + // NewListByResourceGroupPager is the fake for method DisksClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.DisksClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.DisksClientListByResourceGroupResponse]) + + // BeginRevokeAccess is the fake for method DisksClient.BeginRevokeAccess + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRevokeAccess func(ctx context.Context, resourceGroupName string, diskName string, options *armcompute.DisksClientBeginRevokeAccessOptions) (resp azfake.PollerResponder[armcompute.DisksClientRevokeAccessResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method DisksClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, diskName string, disk armcompute.DiskUpdate, options *armcompute.DisksClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.DisksClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewDisksServerTransport creates a new instance of DisksServerTransport with the provided implementation. +// The returned DisksServerTransport instance is connected to an instance of armcompute.DisksClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDisksServerTransport(srv *DisksServer) *DisksServerTransport { + return &DisksServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.DisksClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.DisksClientDeleteResponse]](), + beginGrantAccess: newTracker[azfake.PollerResponder[armcompute.DisksClientGrantAccessResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.DisksClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.DisksClientListByResourceGroupResponse]](), + beginRevokeAccess: newTracker[azfake.PollerResponder[armcompute.DisksClientRevokeAccessResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.DisksClientUpdateResponse]](), + } +} + +// DisksServerTransport connects instances of armcompute.DisksClient to instances of DisksServer. +// Don't use this type directly, use NewDisksServerTransport instead. +type DisksServerTransport struct { + srv *DisksServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.DisksClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.DisksClientDeleteResponse]] + beginGrantAccess *tracker[azfake.PollerResponder[armcompute.DisksClientGrantAccessResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.DisksClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.DisksClientListByResourceGroupResponse]] + beginRevokeAccess *tracker[azfake.PollerResponder[armcompute.DisksClientRevokeAccessResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.DisksClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for DisksServerTransport. +func (d *DisksServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DisksClient.BeginCreateOrUpdate": + resp, err = d.dispatchBeginCreateOrUpdate(req) + case "DisksClient.BeginDelete": + resp, err = d.dispatchBeginDelete(req) + case "DisksClient.Get": + resp, err = d.dispatchGet(req) + case "DisksClient.BeginGrantAccess": + resp, err = d.dispatchBeginGrantAccess(req) + case "DisksClient.NewListPager": + resp, err = d.dispatchNewListPager(req) + case "DisksClient.NewListByResourceGroupPager": + resp, err = d.dispatchNewListByResourceGroupPager(req) + case "DisksClient.BeginRevokeAccess": + resp, err = d.dispatchBeginRevokeAccess(req) + case "DisksClient.BeginUpdate": + resp, err = d.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DisksServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := d.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.Disk](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, diskNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + d.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + d.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (d *DisksServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if d.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := d.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDelete(req.Context(), resourceGroupNameParam, diskNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + d.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + d.beginDelete.remove(req) + } + + return resp, nil +} + +func (d *DisksServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, diskNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Disk, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DisksServerTransport) dispatchBeginGrantAccess(req *http.Request) (*http.Response, error) { + if d.srv.BeginGrantAccess == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGrantAccess not implemented")} + } + beginGrantAccess := d.beginGrantAccess.get(req) + if beginGrantAccess == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/beginGetAccess` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GrantAccessData](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginGrantAccess(req.Context(), resourceGroupNameParam, diskNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGrantAccess = &respr + d.beginGrantAccess.add(req, beginGrantAccess) + } + + resp, err := server.PollerResponderNext(beginGrantAccess, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginGrantAccess.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGrantAccess) { + d.beginGrantAccess.remove(req) + } + + return resp, nil +} + +func (d *DisksServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := d.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := d.srv.NewListPager(nil) + newListPager = &resp + d.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.DisksClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + d.newListPager.remove(req) + } + return resp, nil +} + +func (d *DisksServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := d.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + d.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.DisksClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + d.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (d *DisksServerTransport) dispatchBeginRevokeAccess(req *http.Request) (*http.Response, error) { + if d.srv.BeginRevokeAccess == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRevokeAccess not implemented")} + } + beginRevokeAccess := d.beginRevokeAccess.get(req) + if beginRevokeAccess == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/endGetAccess` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginRevokeAccess(req.Context(), resourceGroupNameParam, diskNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRevokeAccess = &respr + d.beginRevokeAccess.add(req, beginRevokeAccess) + } + + resp, err := server.PollerResponderNext(beginRevokeAccess, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginRevokeAccess.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRevokeAccess) { + d.beginRevokeAccess.remove(req) + } + + return resp, nil +} + +func (d *DisksServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := d.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/disks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.DiskUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + diskNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("diskName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginUpdate(req.Context(), resourceGroupNameParam, diskNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + d.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + d.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + d.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleries_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleries_server.go new file mode 100644 index 00000000000..f4a31f0f6a2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleries_server.go @@ -0,0 +1,371 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// GalleriesServer is a fake server for instances of the armcompute.GalleriesClient type. +type GalleriesServer struct { + // BeginCreateOrUpdate is the fake for method GalleriesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, galleryName string, gallery armcompute.Gallery, options *armcompute.GalleriesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleriesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method GalleriesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, galleryName string, options *armcompute.GalleriesClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.GalleriesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method GalleriesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, galleryName string, options *armcompute.GalleriesClientGetOptions) (resp azfake.Responder[armcompute.GalleriesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method GalleriesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.GalleriesClientListOptions) (resp azfake.PagerResponder[armcompute.GalleriesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method GalleriesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.GalleriesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.GalleriesClientListByResourceGroupResponse]) + + // BeginUpdate is the fake for method GalleriesClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, galleryName string, gallery armcompute.GalleryUpdate, options *armcompute.GalleriesClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleriesClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewGalleriesServerTransport creates a new instance of GalleriesServerTransport with the provided implementation. +// The returned GalleriesServerTransport instance is connected to an instance of armcompute.GalleriesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewGalleriesServerTransport(srv *GalleriesServer) *GalleriesServerTransport { + return &GalleriesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.GalleriesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.GalleriesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.GalleriesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.GalleriesClientListByResourceGroupResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.GalleriesClientUpdateResponse]](), + } +} + +// GalleriesServerTransport connects instances of armcompute.GalleriesClient to instances of GalleriesServer. +// Don't use this type directly, use NewGalleriesServerTransport instead. +type GalleriesServerTransport struct { + srv *GalleriesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.GalleriesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.GalleriesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.GalleriesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.GalleriesClientListByResourceGroupResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.GalleriesClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for GalleriesServerTransport. +func (g *GalleriesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "GalleriesClient.BeginCreateOrUpdate": + resp, err = g.dispatchBeginCreateOrUpdate(req) + case "GalleriesClient.BeginDelete": + resp, err = g.dispatchBeginDelete(req) + case "GalleriesClient.Get": + resp, err = g.dispatchGet(req) + case "GalleriesClient.NewListPager": + resp, err = g.dispatchNewListPager(req) + case "GalleriesClient.NewListByResourceGroupPager": + resp, err = g.dispatchNewListByResourceGroupPager(req) + case "GalleriesClient.BeginUpdate": + resp, err = g.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (g *GalleriesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := g.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.Gallery](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + g.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated, http.StatusAccepted}, resp.StatusCode) { + g.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + g.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (g *GalleriesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if g.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := g.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginDelete(req.Context(), resourceGroupNameParam, galleryNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + g.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + g.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + g.beginDelete.remove(req) + } + + return resp, nil +} + +func (g *GalleriesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if g.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + selectUnescaped, err := url.QueryUnescape(qp.Get("$select")) + if err != nil { + return nil, err + } + selectParam := getOptional(armcompute.SelectPermissions(selectUnescaped)) + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.GalleryExpandParams(expandUnescaped)) + var options *armcompute.GalleriesClientGetOptions + if selectParam != nil || expandParam != nil { + options = &armcompute.GalleriesClientGetOptions{ + Select: selectParam, + Expand: expandParam, + } + } + respr, errRespr := g.srv.Get(req.Context(), resourceGroupNameParam, galleryNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Gallery, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (g *GalleriesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if g.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := g.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := g.srv.NewListPager(nil) + newListPager = &resp + g.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.GalleriesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + g.newListPager.remove(req) + } + return resp, nil +} + +func (g *GalleriesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if g.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := g.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := g.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + g.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.GalleriesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + g.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (g *GalleriesServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := g.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + g.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + g.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryapplications_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryapplications_server.go new file mode 100644 index 00000000000..9e09cb3690c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryapplications_server.go @@ -0,0 +1,332 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// GalleryApplicationsServer is a fake server for instances of the armcompute.GalleryApplicationsClient type. +type GalleryApplicationsServer struct { + // BeginCreateOrUpdate is the fake for method GalleryApplicationsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication armcompute.GalleryApplication, options *armcompute.GalleryApplicationsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryApplicationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method GalleryApplicationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *armcompute.GalleryApplicationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.GalleryApplicationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method GalleryApplicationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *armcompute.GalleryApplicationsClientGetOptions) (resp azfake.Responder[armcompute.GalleryApplicationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByGalleryPager is the fake for method GalleryApplicationsClient.NewListByGalleryPager + // HTTP status codes to indicate success: http.StatusOK + NewListByGalleryPager func(resourceGroupName string, galleryName string, options *armcompute.GalleryApplicationsClientListByGalleryOptions) (resp azfake.PagerResponder[armcompute.GalleryApplicationsClientListByGalleryResponse]) + + // BeginUpdate is the fake for method GalleryApplicationsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication armcompute.GalleryApplicationUpdate, options *armcompute.GalleryApplicationsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryApplicationsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewGalleryApplicationsServerTransport creates a new instance of GalleryApplicationsServerTransport with the provided implementation. +// The returned GalleryApplicationsServerTransport instance is connected to an instance of armcompute.GalleryApplicationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewGalleryApplicationsServerTransport(srv *GalleryApplicationsServer) *GalleryApplicationsServerTransport { + return &GalleryApplicationsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryApplicationsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.GalleryApplicationsClientDeleteResponse]](), + newListByGalleryPager: newTracker[azfake.PagerResponder[armcompute.GalleryApplicationsClientListByGalleryResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryApplicationsClientUpdateResponse]](), + } +} + +// GalleryApplicationsServerTransport connects instances of armcompute.GalleryApplicationsClient to instances of GalleryApplicationsServer. +// Don't use this type directly, use NewGalleryApplicationsServerTransport instead. +type GalleryApplicationsServerTransport struct { + srv *GalleryApplicationsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.GalleryApplicationsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.GalleryApplicationsClientDeleteResponse]] + newListByGalleryPager *tracker[azfake.PagerResponder[armcompute.GalleryApplicationsClientListByGalleryResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.GalleryApplicationsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for GalleryApplicationsServerTransport. +func (g *GalleryApplicationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "GalleryApplicationsClient.BeginCreateOrUpdate": + resp, err = g.dispatchBeginCreateOrUpdate(req) + case "GalleryApplicationsClient.BeginDelete": + resp, err = g.dispatchBeginDelete(req) + case "GalleryApplicationsClient.Get": + resp, err = g.dispatchGet(req) + case "GalleryApplicationsClient.NewListByGalleryPager": + resp, err = g.dispatchNewListByGalleryPager(req) + case "GalleryApplicationsClient.BeginUpdate": + resp, err = g.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (g *GalleryApplicationsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := g.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryApplication](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + g.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated, http.StatusAccepted}, resp.StatusCode) { + g.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + g.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (g *GalleryApplicationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if g.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := g.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginDelete(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + g.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + g.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + g.beginDelete.remove(req) + } + + return resp, nil +} + +func (g *GalleryApplicationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if g.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.Get(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).GalleryApplication, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (g *GalleryApplicationsServerTransport) dispatchNewListByGalleryPager(req *http.Request) (*http.Response, error) { + if g.srv.NewListByGalleryPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByGalleryPager not implemented")} + } + newListByGalleryPager := g.newListByGalleryPager.get(req) + if newListByGalleryPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + resp := g.srv.NewListByGalleryPager(resourceGroupNameParam, galleryNameParam, nil) + newListByGalleryPager = &resp + g.newListByGalleryPager.add(req, newListByGalleryPager) + server.PagerResponderInjectNextLinks(newListByGalleryPager, req, func(page *armcompute.GalleryApplicationsClientListByGalleryResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByGalleryPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.newListByGalleryPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByGalleryPager) { + g.newListByGalleryPager.remove(req) + } + return resp, nil +} + +func (g *GalleryApplicationsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := g.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryApplicationUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + g.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + g.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryapplicationversions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryapplicationversions_server.go new file mode 100644 index 00000000000..c272b3a31b0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryapplicationversions_server.go @@ -0,0 +1,364 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// GalleryApplicationVersionsServer is a fake server for instances of the armcompute.GalleryApplicationVersionsClient type. +type GalleryApplicationVersionsServer struct { + // BeginCreateOrUpdate is the fake for method GalleryApplicationVersionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion armcompute.GalleryApplicationVersion, options *armcompute.GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method GalleryApplicationVersionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *armcompute.GalleryApplicationVersionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method GalleryApplicationVersionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *armcompute.GalleryApplicationVersionsClientGetOptions) (resp azfake.Responder[armcompute.GalleryApplicationVersionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByGalleryApplicationPager is the fake for method GalleryApplicationVersionsClient.NewListByGalleryApplicationPager + // HTTP status codes to indicate success: http.StatusOK + NewListByGalleryApplicationPager func(resourceGroupName string, galleryName string, galleryApplicationName string, options *armcompute.GalleryApplicationVersionsClientListByGalleryApplicationOptions) (resp azfake.PagerResponder[armcompute.GalleryApplicationVersionsClientListByGalleryApplicationResponse]) + + // BeginUpdate is the fake for method GalleryApplicationVersionsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion armcompute.GalleryApplicationVersionUpdate, options *armcompute.GalleryApplicationVersionsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewGalleryApplicationVersionsServerTransport creates a new instance of GalleryApplicationVersionsServerTransport with the provided implementation. +// The returned GalleryApplicationVersionsServerTransport instance is connected to an instance of armcompute.GalleryApplicationVersionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewGalleryApplicationVersionsServerTransport(srv *GalleryApplicationVersionsServer) *GalleryApplicationVersionsServerTransport { + return &GalleryApplicationVersionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientDeleteResponse]](), + newListByGalleryApplicationPager: newTracker[azfake.PagerResponder[armcompute.GalleryApplicationVersionsClientListByGalleryApplicationResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientUpdateResponse]](), + } +} + +// GalleryApplicationVersionsServerTransport connects instances of armcompute.GalleryApplicationVersionsClient to instances of GalleryApplicationVersionsServer. +// Don't use this type directly, use NewGalleryApplicationVersionsServerTransport instead. +type GalleryApplicationVersionsServerTransport struct { + srv *GalleryApplicationVersionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientDeleteResponse]] + newListByGalleryApplicationPager *tracker[azfake.PagerResponder[armcompute.GalleryApplicationVersionsClientListByGalleryApplicationResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.GalleryApplicationVersionsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for GalleryApplicationVersionsServerTransport. +func (g *GalleryApplicationVersionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "GalleryApplicationVersionsClient.BeginCreateOrUpdate": + resp, err = g.dispatchBeginCreateOrUpdate(req) + case "GalleryApplicationVersionsClient.BeginDelete": + resp, err = g.dispatchBeginDelete(req) + case "GalleryApplicationVersionsClient.Get": + resp, err = g.dispatchGet(req) + case "GalleryApplicationVersionsClient.NewListByGalleryApplicationPager": + resp, err = g.dispatchNewListByGalleryApplicationPager(req) + case "GalleryApplicationVersionsClient.BeginUpdate": + resp, err = g.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (g *GalleryApplicationVersionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := g.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryApplicationVersion](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + galleryApplicationVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, galleryApplicationVersionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + g.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated, http.StatusAccepted}, resp.StatusCode) { + g.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + g.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (g *GalleryApplicationVersionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if g.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := g.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + galleryApplicationVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginDelete(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, galleryApplicationVersionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + g.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + g.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + g.beginDelete.remove(req) + } + + return resp, nil +} + +func (g *GalleryApplicationVersionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if g.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + galleryApplicationVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationVersionName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.ReplicationStatusTypes(expandUnescaped)) + var options *armcompute.GalleryApplicationVersionsClientGetOptions + if expandParam != nil { + options = &armcompute.GalleryApplicationVersionsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := g.srv.Get(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, galleryApplicationVersionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).GalleryApplicationVersion, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (g *GalleryApplicationVersionsServerTransport) dispatchNewListByGalleryApplicationPager(req *http.Request) (*http.Response, error) { + if g.srv.NewListByGalleryApplicationPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByGalleryApplicationPager not implemented")} + } + newListByGalleryApplicationPager := g.newListByGalleryApplicationPager.get(req) + if newListByGalleryApplicationPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + resp := g.srv.NewListByGalleryApplicationPager(resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, nil) + newListByGalleryApplicationPager = &resp + g.newListByGalleryApplicationPager.add(req, newListByGalleryApplicationPager) + server.PagerResponderInjectNextLinks(newListByGalleryApplicationPager, req, func(page *armcompute.GalleryApplicationVersionsClientListByGalleryApplicationResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByGalleryApplicationPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.newListByGalleryApplicationPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByGalleryApplicationPager) { + g.newListByGalleryApplicationPager.remove(req) + } + return resp, nil +} + +func (g *GalleryApplicationVersionsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := g.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applications/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryApplicationVersionUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryApplicationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationName")]) + if err != nil { + return nil, err + } + galleryApplicationVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryApplicationVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryApplicationNameParam, galleryApplicationVersionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + g.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + g.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryimages_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryimages_server.go new file mode 100644 index 00000000000..2a4b61ad7cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryimages_server.go @@ -0,0 +1,332 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// GalleryImagesServer is a fake server for instances of the armcompute.GalleryImagesClient type. +type GalleryImagesServer struct { + // BeginCreateOrUpdate is the fake for method GalleryImagesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage armcompute.GalleryImage, options *armcompute.GalleryImagesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryImagesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method GalleryImagesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *armcompute.GalleryImagesClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.GalleryImagesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method GalleryImagesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *armcompute.GalleryImagesClientGetOptions) (resp azfake.Responder[armcompute.GalleryImagesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByGalleryPager is the fake for method GalleryImagesClient.NewListByGalleryPager + // HTTP status codes to indicate success: http.StatusOK + NewListByGalleryPager func(resourceGroupName string, galleryName string, options *armcompute.GalleryImagesClientListByGalleryOptions) (resp azfake.PagerResponder[armcompute.GalleryImagesClientListByGalleryResponse]) + + // BeginUpdate is the fake for method GalleryImagesClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage armcompute.GalleryImageUpdate, options *armcompute.GalleryImagesClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryImagesClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewGalleryImagesServerTransport creates a new instance of GalleryImagesServerTransport with the provided implementation. +// The returned GalleryImagesServerTransport instance is connected to an instance of armcompute.GalleryImagesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewGalleryImagesServerTransport(srv *GalleryImagesServer) *GalleryImagesServerTransport { + return &GalleryImagesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryImagesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.GalleryImagesClientDeleteResponse]](), + newListByGalleryPager: newTracker[azfake.PagerResponder[armcompute.GalleryImagesClientListByGalleryResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryImagesClientUpdateResponse]](), + } +} + +// GalleryImagesServerTransport connects instances of armcompute.GalleryImagesClient to instances of GalleryImagesServer. +// Don't use this type directly, use NewGalleryImagesServerTransport instead. +type GalleryImagesServerTransport struct { + srv *GalleryImagesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.GalleryImagesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.GalleryImagesClientDeleteResponse]] + newListByGalleryPager *tracker[azfake.PagerResponder[armcompute.GalleryImagesClientListByGalleryResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.GalleryImagesClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for GalleryImagesServerTransport. +func (g *GalleryImagesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "GalleryImagesClient.BeginCreateOrUpdate": + resp, err = g.dispatchBeginCreateOrUpdate(req) + case "GalleryImagesClient.BeginDelete": + resp, err = g.dispatchBeginDelete(req) + case "GalleryImagesClient.Get": + resp, err = g.dispatchGet(req) + case "GalleryImagesClient.NewListByGalleryPager": + resp, err = g.dispatchNewListByGalleryPager(req) + case "GalleryImagesClient.BeginUpdate": + resp, err = g.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (g *GalleryImagesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := g.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryImage](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + g.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated, http.StatusAccepted}, resp.StatusCode) { + g.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + g.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (g *GalleryImagesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if g.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := g.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginDelete(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + g.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + g.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + g.beginDelete.remove(req) + } + + return resp, nil +} + +func (g *GalleryImagesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if g.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.Get(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).GalleryImage, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (g *GalleryImagesServerTransport) dispatchNewListByGalleryPager(req *http.Request) (*http.Response, error) { + if g.srv.NewListByGalleryPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByGalleryPager not implemented")} + } + newListByGalleryPager := g.newListByGalleryPager.get(req) + if newListByGalleryPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + resp := g.srv.NewListByGalleryPager(resourceGroupNameParam, galleryNameParam, nil) + newListByGalleryPager = &resp + g.newListByGalleryPager.add(req, newListByGalleryPager) + server.PagerResponderInjectNextLinks(newListByGalleryPager, req, func(page *armcompute.GalleryImagesClientListByGalleryResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByGalleryPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.newListByGalleryPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByGalleryPager) { + g.newListByGalleryPager.remove(req) + } + return resp, nil +} + +func (g *GalleryImagesServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := g.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryImageUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + g.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + g.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryimageversions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryimageversions_server.go new file mode 100644 index 00000000000..1eef7f42c80 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/galleryimageversions_server.go @@ -0,0 +1,364 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// GalleryImageVersionsServer is a fake server for instances of the armcompute.GalleryImageVersionsClient type. +type GalleryImageVersionsServer struct { + // BeginCreateOrUpdate is the fake for method GalleryImageVersionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion armcompute.GalleryImageVersion, options *armcompute.GalleryImageVersionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryImageVersionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method GalleryImageVersionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *armcompute.GalleryImageVersionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.GalleryImageVersionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method GalleryImageVersionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *armcompute.GalleryImageVersionsClientGetOptions) (resp azfake.Responder[armcompute.GalleryImageVersionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByGalleryImagePager is the fake for method GalleryImageVersionsClient.NewListByGalleryImagePager + // HTTP status codes to indicate success: http.StatusOK + NewListByGalleryImagePager func(resourceGroupName string, galleryName string, galleryImageName string, options *armcompute.GalleryImageVersionsClientListByGalleryImageOptions) (resp azfake.PagerResponder[armcompute.GalleryImageVersionsClientListByGalleryImageResponse]) + + // BeginUpdate is the fake for method GalleryImageVersionsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion armcompute.GalleryImageVersionUpdate, options *armcompute.GalleryImageVersionsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.GalleryImageVersionsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewGalleryImageVersionsServerTransport creates a new instance of GalleryImageVersionsServerTransport with the provided implementation. +// The returned GalleryImageVersionsServerTransport instance is connected to an instance of armcompute.GalleryImageVersionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewGalleryImageVersionsServerTransport(srv *GalleryImageVersionsServer) *GalleryImageVersionsServerTransport { + return &GalleryImageVersionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryImageVersionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.GalleryImageVersionsClientDeleteResponse]](), + newListByGalleryImagePager: newTracker[azfake.PagerResponder[armcompute.GalleryImageVersionsClientListByGalleryImageResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.GalleryImageVersionsClientUpdateResponse]](), + } +} + +// GalleryImageVersionsServerTransport connects instances of armcompute.GalleryImageVersionsClient to instances of GalleryImageVersionsServer. +// Don't use this type directly, use NewGalleryImageVersionsServerTransport instead. +type GalleryImageVersionsServerTransport struct { + srv *GalleryImageVersionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.GalleryImageVersionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.GalleryImageVersionsClientDeleteResponse]] + newListByGalleryImagePager *tracker[azfake.PagerResponder[armcompute.GalleryImageVersionsClientListByGalleryImageResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.GalleryImageVersionsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for GalleryImageVersionsServerTransport. +func (g *GalleryImageVersionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "GalleryImageVersionsClient.BeginCreateOrUpdate": + resp, err = g.dispatchBeginCreateOrUpdate(req) + case "GalleryImageVersionsClient.BeginDelete": + resp, err = g.dispatchBeginDelete(req) + case "GalleryImageVersionsClient.Get": + resp, err = g.dispatchGet(req) + case "GalleryImageVersionsClient.NewListByGalleryImagePager": + resp, err = g.dispatchNewListByGalleryImagePager(req) + case "GalleryImageVersionsClient.BeginUpdate": + resp, err = g.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (g *GalleryImageVersionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := g.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryImageVersion](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + galleryImageVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, galleryImageVersionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + g.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated, http.StatusAccepted}, resp.StatusCode) { + g.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + g.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (g *GalleryImageVersionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if g.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := g.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + galleryImageVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginDelete(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, galleryImageVersionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + g.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + g.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + g.beginDelete.remove(req) + } + + return resp, nil +} + +func (g *GalleryImageVersionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if g.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + galleryImageVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageVersionName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.ReplicationStatusTypes(expandUnescaped)) + var options *armcompute.GalleryImageVersionsClientGetOptions + if expandParam != nil { + options = &armcompute.GalleryImageVersionsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := g.srv.Get(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, galleryImageVersionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).GalleryImageVersion, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (g *GalleryImageVersionsServerTransport) dispatchNewListByGalleryImagePager(req *http.Request) (*http.Response, error) { + if g.srv.NewListByGalleryImagePager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByGalleryImagePager not implemented")} + } + newListByGalleryImagePager := g.newListByGalleryImagePager.get(req) + if newListByGalleryImagePager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + resp := g.srv.NewListByGalleryImagePager(resourceGroupNameParam, galleryNameParam, galleryImageNameParam, nil) + newListByGalleryImagePager = &resp + g.newListByGalleryImagePager.add(req, newListByGalleryImagePager) + server.PagerResponderInjectNextLinks(newListByGalleryImagePager, req, func(page *armcompute.GalleryImageVersionsClientListByGalleryImageResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByGalleryImagePager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.newListByGalleryImagePager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByGalleryImagePager) { + g.newListByGalleryImagePager.remove(req) + } + return resp, nil +} + +func (g *GalleryImageVersionsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := g.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GalleryImageVersionUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + galleryImageVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, galleryImageNameParam, galleryImageVersionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + g.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + g.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/gallerysharingprofile_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/gallerysharingprofile_server.go new file mode 100644 index 00000000000..eb4caf2d9e9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/gallerysharingprofile_server.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// GallerySharingProfileServer is a fake server for instances of the armcompute.GallerySharingProfileClient type. +type GallerySharingProfileServer struct { + // BeginUpdate is the fake for method GallerySharingProfileClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate armcompute.SharingUpdate, options *armcompute.GallerySharingProfileClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.GallerySharingProfileClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewGallerySharingProfileServerTransport creates a new instance of GallerySharingProfileServerTransport with the provided implementation. +// The returned GallerySharingProfileServerTransport instance is connected to an instance of armcompute.GallerySharingProfileClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewGallerySharingProfileServerTransport(srv *GallerySharingProfileServer) *GallerySharingProfileServerTransport { + return &GallerySharingProfileServerTransport{ + srv: srv, + beginUpdate: newTracker[azfake.PollerResponder[armcompute.GallerySharingProfileClientUpdateResponse]](), + } +} + +// GallerySharingProfileServerTransport connects instances of armcompute.GallerySharingProfileClient to instances of GallerySharingProfileServer. +// Don't use this type directly, use NewGallerySharingProfileServerTransport instead. +type GallerySharingProfileServerTransport struct { + srv *GallerySharingProfileServer + beginUpdate *tracker[azfake.PollerResponder[armcompute.GallerySharingProfileClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for GallerySharingProfileServerTransport. +func (g *GallerySharingProfileServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "GallerySharingProfileClient.BeginUpdate": + resp, err = g.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (g *GallerySharingProfileServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if g.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := g.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/galleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/share` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.SharingUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + galleryNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.BeginUpdate(req.Context(), resourceGroupNameParam, galleryNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + g.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + g.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + g.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/images_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/images_server.go new file mode 100644 index 00000000000..2f542b14ced --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/images_server.go @@ -0,0 +1,365 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// ImagesServer is a fake server for instances of the armcompute.ImagesClient type. +type ImagesServer struct { + // BeginCreateOrUpdate is the fake for method ImagesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, imageName string, parameters armcompute.Image, options *armcompute.ImagesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.ImagesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ImagesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, imageName string, options *armcompute.ImagesClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.ImagesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ImagesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, imageName string, options *armcompute.ImagesClientGetOptions) (resp azfake.Responder[armcompute.ImagesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ImagesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.ImagesClientListOptions) (resp azfake.PagerResponder[armcompute.ImagesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method ImagesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.ImagesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.ImagesClientListByResourceGroupResponse]) + + // BeginUpdate is the fake for method ImagesClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginUpdate func(ctx context.Context, resourceGroupName string, imageName string, parameters armcompute.ImageUpdate, options *armcompute.ImagesClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.ImagesClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewImagesServerTransport creates a new instance of ImagesServerTransport with the provided implementation. +// The returned ImagesServerTransport instance is connected to an instance of armcompute.ImagesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewImagesServerTransport(srv *ImagesServer) *ImagesServerTransport { + return &ImagesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.ImagesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.ImagesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.ImagesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.ImagesClientListByResourceGroupResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.ImagesClientUpdateResponse]](), + } +} + +// ImagesServerTransport connects instances of armcompute.ImagesClient to instances of ImagesServer. +// Don't use this type directly, use NewImagesServerTransport instead. +type ImagesServerTransport struct { + srv *ImagesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.ImagesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.ImagesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.ImagesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.ImagesClientListByResourceGroupResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.ImagesClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for ImagesServerTransport. +func (i *ImagesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ImagesClient.BeginCreateOrUpdate": + resp, err = i.dispatchBeginCreateOrUpdate(req) + case "ImagesClient.BeginDelete": + resp, err = i.dispatchBeginDelete(req) + case "ImagesClient.Get": + resp, err = i.dispatchGet(req) + case "ImagesClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + case "ImagesClient.NewListByResourceGroupPager": + resp, err = i.dispatchNewListByResourceGroupPager(req) + case "ImagesClient.BeginUpdate": + resp, err = i.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *ImagesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := i.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.Image](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + imageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("imageName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, imageNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + i.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + i.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (i *ImagesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if i.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := i.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + imageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("imageName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginDelete(req.Context(), resourceGroupNameParam, imageNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + i.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + i.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + i.beginDelete.remove(req) + } + + return resp, nil +} + +func (i *ImagesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if i.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + imageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("imageName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.ImagesClientGetOptions + if expandParam != nil { + options = &armcompute.ImagesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.Get(req.Context(), resourceGroupNameParam, imageNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Image, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *ImagesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/images` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := i.srv.NewListPager(nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.ImagesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} + +func (i *ImagesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := i.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/images` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + i.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.ImagesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + i.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (i *ImagesServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := i.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.ImageUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + imageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("imageName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginUpdate(req.Context(), resourceGroupNameParam, imageNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + i.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + i.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/internal.go new file mode 100644 index 00000000000..96339f26b37 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/internal.go @@ -0,0 +1,99 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "net/http" + "reflect" + "sync" +) + +type nonRetriableError struct { + error +} + +func (nonRetriableError) NonRetriable() { + // marker method +} + +func contains[T comparable](s []T, v T) bool { + for _, vv := range s { + if vv == v { + return true + } + } + return false +} + +func getHeaderValue(h http.Header, k string) string { + v := h[k] + if len(v) == 0 { + return "" + } + return v[0] +} + +func getOptional[T any](v T) *T { + if reflect.ValueOf(v).IsZero() { + return nil + } + return &v +} + +func parseOptional[T any](v string, parse func(v string) (T, error)) (*T, error) { + if v == "" { + return nil, nil + } + t, err := parse(v) + if err != nil { + return nil, err + } + return &t, err +} + +func parseWithCast[T any](v string, parse func(v string) (T, error)) (T, error) { + t, err := parse(v) + if err != nil { + return *new(T), err + } + return t, err +} + +func newTracker[T any]() *tracker[T] { + return &tracker[T]{ + items: map[string]*T{}, + } +} + +type tracker[T any] struct { + items map[string]*T + mu sync.Mutex +} + +func (p *tracker[T]) get(req *http.Request) *T { + p.mu.Lock() + defer p.mu.Unlock() + if item, ok := p.items[server.SanitizePagerPollerPath(req.URL.Path)]; ok { + return item + } + return nil +} + +func (p *tracker[T]) add(req *http.Request, item *T) { + p.mu.Lock() + defer p.mu.Unlock() + p.items[server.SanitizePagerPollerPath(req.URL.Path)] = item +} + +func (p *tracker[T]) remove(req *http.Request) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.items, server.SanitizePagerPollerPath(req.URL.Path)) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/loganalytics_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/loganalytics_server.go new file mode 100644 index 00000000000..af741bb5677 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/loganalytics_server.go @@ -0,0 +1,167 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// LogAnalyticsServer is a fake server for instances of the armcompute.LogAnalyticsClient type. +type LogAnalyticsServer struct { + // BeginExportRequestRateByInterval is the fake for method LogAnalyticsClient.BeginExportRequestRateByInterval + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginExportRequestRateByInterval func(ctx context.Context, location string, parameters armcompute.RequestRateByIntervalInput, options *armcompute.LogAnalyticsClientBeginExportRequestRateByIntervalOptions) (resp azfake.PollerResponder[armcompute.LogAnalyticsClientExportRequestRateByIntervalResponse], errResp azfake.ErrorResponder) + + // BeginExportThrottledRequests is the fake for method LogAnalyticsClient.BeginExportThrottledRequests + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginExportThrottledRequests func(ctx context.Context, location string, parameters armcompute.ThrottledRequestsInput, options *armcompute.LogAnalyticsClientBeginExportThrottledRequestsOptions) (resp azfake.PollerResponder[armcompute.LogAnalyticsClientExportThrottledRequestsResponse], errResp azfake.ErrorResponder) +} + +// NewLogAnalyticsServerTransport creates a new instance of LogAnalyticsServerTransport with the provided implementation. +// The returned LogAnalyticsServerTransport instance is connected to an instance of armcompute.LogAnalyticsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLogAnalyticsServerTransport(srv *LogAnalyticsServer) *LogAnalyticsServerTransport { + return &LogAnalyticsServerTransport{ + srv: srv, + beginExportRequestRateByInterval: newTracker[azfake.PollerResponder[armcompute.LogAnalyticsClientExportRequestRateByIntervalResponse]](), + beginExportThrottledRequests: newTracker[azfake.PollerResponder[armcompute.LogAnalyticsClientExportThrottledRequestsResponse]](), + } +} + +// LogAnalyticsServerTransport connects instances of armcompute.LogAnalyticsClient to instances of LogAnalyticsServer. +// Don't use this type directly, use NewLogAnalyticsServerTransport instead. +type LogAnalyticsServerTransport struct { + srv *LogAnalyticsServer + beginExportRequestRateByInterval *tracker[azfake.PollerResponder[armcompute.LogAnalyticsClientExportRequestRateByIntervalResponse]] + beginExportThrottledRequests *tracker[azfake.PollerResponder[armcompute.LogAnalyticsClientExportThrottledRequestsResponse]] +} + +// Do implements the policy.Transporter interface for LogAnalyticsServerTransport. +func (l *LogAnalyticsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LogAnalyticsClient.BeginExportRequestRateByInterval": + resp, err = l.dispatchBeginExportRequestRateByInterval(req) + case "LogAnalyticsClient.BeginExportThrottledRequests": + resp, err = l.dispatchBeginExportThrottledRequests(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LogAnalyticsServerTransport) dispatchBeginExportRequestRateByInterval(req *http.Request) (*http.Response, error) { + if l.srv.BeginExportRequestRateByInterval == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginExportRequestRateByInterval not implemented")} + } + beginExportRequestRateByInterval := l.beginExportRequestRateByInterval.get(req) + if beginExportRequestRateByInterval == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/logAnalytics/apiAccess/getRequestRateByInterval` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RequestRateByIntervalInput](req) + if err != nil { + return nil, err + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginExportRequestRateByInterval(req.Context(), locationParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginExportRequestRateByInterval = &respr + l.beginExportRequestRateByInterval.add(req, beginExportRequestRateByInterval) + } + + resp, err := server.PollerResponderNext(beginExportRequestRateByInterval, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + l.beginExportRequestRateByInterval.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginExportRequestRateByInterval) { + l.beginExportRequestRateByInterval.remove(req) + } + + return resp, nil +} + +func (l *LogAnalyticsServerTransport) dispatchBeginExportThrottledRequests(req *http.Request) (*http.Response, error) { + if l.srv.BeginExportThrottledRequests == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginExportThrottledRequests not implemented")} + } + beginExportThrottledRequests := l.beginExportThrottledRequests.get(req) + if beginExportThrottledRequests == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/logAnalytics/apiAccess/getThrottledRequests` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.ThrottledRequestsInput](req) + if err != nil { + return nil, err + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginExportThrottledRequests(req.Context(), locationParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginExportThrottledRequests = &respr + l.beginExportThrottledRequests.add(req, beginExportThrottledRequests) + } + + resp, err := server.PollerResponderNext(beginExportThrottledRequests, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + l.beginExportThrottledRequests.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginExportThrottledRequests) { + l.beginExportThrottledRequests.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/operations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/operations_server.go new file mode 100644 index 00000000000..103898c5c4b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/operations_server.go @@ -0,0 +1,92 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" +) + +// OperationsServer is a fake server for instances of the armcompute.OperationsClient type. +type OperationsServer struct { + // NewListPager is the fake for method OperationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.OperationsClientListOptions) (resp azfake.PagerResponder[armcompute.OperationsClientListResponse]) +} + +// NewOperationsServerTransport creates a new instance of OperationsServerTransport with the provided implementation. +// The returned OperationsServerTransport instance is connected to an instance of armcompute.OperationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewOperationsServerTransport(srv *OperationsServer) *OperationsServerTransport { + return &OperationsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.OperationsClientListResponse]](), + } +} + +// OperationsServerTransport connects instances of armcompute.OperationsClient to instances of OperationsServer. +// Don't use this type directly, use NewOperationsServerTransport instead. +type OperationsServerTransport struct { + srv *OperationsServer + newListPager *tracker[azfake.PagerResponder[armcompute.OperationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for OperationsServerTransport. +func (o *OperationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "OperationsClient.NewListPager": + resp, err = o.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (o *OperationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if o.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := o.newListPager.get(req) + if newListPager == nil { + resp := o.srv.NewListPager(nil) + newListPager = &resp + o.newListPager.add(req, newListPager) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + o.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + o.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/proximityplacementgroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/proximityplacementgroups_server.go new file mode 100644 index 00000000000..bb782718760 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/proximityplacementgroups_server.go @@ -0,0 +1,326 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// ProximityPlacementGroupsServer is a fake server for instances of the armcompute.ProximityPlacementGroupsClient type. +type ProximityPlacementGroupsServer struct { + // CreateOrUpdate is the fake for method ProximityPlacementGroupsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters armcompute.ProximityPlacementGroup, options *armcompute.ProximityPlacementGroupsClientCreateOrUpdateOptions) (resp azfake.Responder[armcompute.ProximityPlacementGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method ProximityPlacementGroupsClient.Delete + // HTTP status codes to indicate success: http.StatusOK + Delete func(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, options *armcompute.ProximityPlacementGroupsClientDeleteOptions) (resp azfake.Responder[armcompute.ProximityPlacementGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ProximityPlacementGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, options *armcompute.ProximityPlacementGroupsClientGetOptions) (resp azfake.Responder[armcompute.ProximityPlacementGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByResourceGroupPager is the fake for method ProximityPlacementGroupsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.ProximityPlacementGroupsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.ProximityPlacementGroupsClientListByResourceGroupResponse]) + + // NewListBySubscriptionPager is the fake for method ProximityPlacementGroupsClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armcompute.ProximityPlacementGroupsClientListBySubscriptionOptions) (resp azfake.PagerResponder[armcompute.ProximityPlacementGroupsClientListBySubscriptionResponse]) + + // Update is the fake for method ProximityPlacementGroupsClient.Update + // HTTP status codes to indicate success: http.StatusOK + Update func(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters armcompute.ProximityPlacementGroupUpdate, options *armcompute.ProximityPlacementGroupsClientUpdateOptions) (resp azfake.Responder[armcompute.ProximityPlacementGroupsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewProximityPlacementGroupsServerTransport creates a new instance of ProximityPlacementGroupsServerTransport with the provided implementation. +// The returned ProximityPlacementGroupsServerTransport instance is connected to an instance of armcompute.ProximityPlacementGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewProximityPlacementGroupsServerTransport(srv *ProximityPlacementGroupsServer) *ProximityPlacementGroupsServerTransport { + return &ProximityPlacementGroupsServerTransport{ + srv: srv, + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.ProximityPlacementGroupsClientListByResourceGroupResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armcompute.ProximityPlacementGroupsClientListBySubscriptionResponse]](), + } +} + +// ProximityPlacementGroupsServerTransport connects instances of armcompute.ProximityPlacementGroupsClient to instances of ProximityPlacementGroupsServer. +// Don't use this type directly, use NewProximityPlacementGroupsServerTransport instead. +type ProximityPlacementGroupsServerTransport struct { + srv *ProximityPlacementGroupsServer + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.ProximityPlacementGroupsClientListByResourceGroupResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armcompute.ProximityPlacementGroupsClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for ProximityPlacementGroupsServerTransport. +func (p *ProximityPlacementGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ProximityPlacementGroupsClient.CreateOrUpdate": + resp, err = p.dispatchCreateOrUpdate(req) + case "ProximityPlacementGroupsClient.Delete": + resp, err = p.dispatchDelete(req) + case "ProximityPlacementGroupsClient.Get": + resp, err = p.dispatchGet(req) + case "ProximityPlacementGroupsClient.NewListByResourceGroupPager": + resp, err = p.dispatchNewListByResourceGroupPager(req) + case "ProximityPlacementGroupsClient.NewListBySubscriptionPager": + resp, err = p.dispatchNewListBySubscriptionPager(req) + case "ProximityPlacementGroupsClient.Update": + resp, err = p.dispatchUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *ProximityPlacementGroupsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/proximityPlacementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.ProximityPlacementGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + proximityPlacementGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("proximityPlacementGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, proximityPlacementGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ProximityPlacementGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *ProximityPlacementGroupsServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if p.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/proximityPlacementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + proximityPlacementGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("proximityPlacementGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.Delete(req.Context(), resourceGroupNameParam, proximityPlacementGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *ProximityPlacementGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/proximityPlacementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + proximityPlacementGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("proximityPlacementGroupName")]) + if err != nil { + return nil, err + } + includeColocationStatusUnescaped, err := url.QueryUnescape(qp.Get("includeColocationStatus")) + if err != nil { + return nil, err + } + includeColocationStatusParam := getOptional(includeColocationStatusUnescaped) + var options *armcompute.ProximityPlacementGroupsClientGetOptions + if includeColocationStatusParam != nil { + options = &armcompute.ProximityPlacementGroupsClientGetOptions{ + IncludeColocationStatus: includeColocationStatusParam, + } + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, proximityPlacementGroupNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ProximityPlacementGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *ProximityPlacementGroupsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := p.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/proximityPlacementGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + p.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.ProximityPlacementGroupsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + p.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (p *ProximityPlacementGroupsServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := p.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/proximityPlacementGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := p.srv.NewListBySubscriptionPager(nil) + newListBySubscriptionPager = &resp + p.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armcompute.ProximityPlacementGroupsClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + p.newListBySubscriptionPager.remove(req) + } + return resp, nil +} + +func (p *ProximityPlacementGroupsServerTransport) dispatchUpdate(req *http.Request) (*http.Response, error) { + if p.srv.Update == nil { + return nil, &nonRetriableError{errors.New("fake for method Update not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/proximityPlacementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.ProximityPlacementGroupUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + proximityPlacementGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("proximityPlacementGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.Update(req.Context(), resourceGroupNameParam, proximityPlacementGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ProximityPlacementGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/resourceskus_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/resourceskus_server.go new file mode 100644 index 00000000000..10f08bd7b40 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/resourceskus_server.go @@ -0,0 +1,122 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// ResourceSKUsServer is a fake server for instances of the armcompute.ResourceSKUsClient type. +type ResourceSKUsServer struct { + // NewListPager is the fake for method ResourceSKUsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.ResourceSKUsClientListOptions) (resp azfake.PagerResponder[armcompute.ResourceSKUsClientListResponse]) +} + +// NewResourceSKUsServerTransport creates a new instance of ResourceSKUsServerTransport with the provided implementation. +// The returned ResourceSKUsServerTransport instance is connected to an instance of armcompute.ResourceSKUsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewResourceSKUsServerTransport(srv *ResourceSKUsServer) *ResourceSKUsServerTransport { + return &ResourceSKUsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.ResourceSKUsClientListResponse]](), + } +} + +// ResourceSKUsServerTransport connects instances of armcompute.ResourceSKUsClient to instances of ResourceSKUsServer. +// Don't use this type directly, use NewResourceSKUsServerTransport instead. +type ResourceSKUsServerTransport struct { + srv *ResourceSKUsServer + newListPager *tracker[azfake.PagerResponder[armcompute.ResourceSKUsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ResourceSKUsServerTransport. +func (r *ResourceSKUsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ResourceSKUsClient.NewListPager": + resp, err = r.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *ResourceSKUsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := r.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/skus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + filterUnescaped, err := url.QueryUnescape(qp.Get("$filter")) + if err != nil { + return nil, err + } + filterParam := getOptional(filterUnescaped) + includeExtendedLocationsUnescaped, err := url.QueryUnescape(qp.Get("includeExtendedLocations")) + if err != nil { + return nil, err + } + includeExtendedLocationsParam := getOptional(includeExtendedLocationsUnescaped) + var options *armcompute.ResourceSKUsClientListOptions + if filterParam != nil || includeExtendedLocationsParam != nil { + options = &armcompute.ResourceSKUsClientListOptions{ + Filter: filterParam, + IncludeExtendedLocations: includeExtendedLocationsParam, + } + } + resp := r.srv.NewListPager(options) + newListPager = &resp + r.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.ResourceSKUsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + r.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/restorepointcollections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/restorepointcollections_server.go new file mode 100644 index 00000000000..e5c22a23413 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/restorepointcollections_server.go @@ -0,0 +1,339 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// RestorePointCollectionsServer is a fake server for instances of the armcompute.RestorePointCollectionsClient type. +type RestorePointCollectionsServer struct { + // CreateOrUpdate is the fake for method RestorePointCollectionsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters armcompute.RestorePointCollection, options *armcompute.RestorePointCollectionsClientCreateOrUpdateOptions) (resp azfake.Responder[armcompute.RestorePointCollectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RestorePointCollectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *armcompute.RestorePointCollectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.RestorePointCollectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RestorePointCollectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *armcompute.RestorePointCollectionsClientGetOptions) (resp azfake.Responder[armcompute.RestorePointCollectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method RestorePointCollectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armcompute.RestorePointCollectionsClientListOptions) (resp azfake.PagerResponder[armcompute.RestorePointCollectionsClientListResponse]) + + // NewListAllPager is the fake for method RestorePointCollectionsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armcompute.RestorePointCollectionsClientListAllOptions) (resp azfake.PagerResponder[armcompute.RestorePointCollectionsClientListAllResponse]) + + // Update is the fake for method RestorePointCollectionsClient.Update + // HTTP status codes to indicate success: http.StatusOK + Update func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters armcompute.RestorePointCollectionUpdate, options *armcompute.RestorePointCollectionsClientUpdateOptions) (resp azfake.Responder[armcompute.RestorePointCollectionsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewRestorePointCollectionsServerTransport creates a new instance of RestorePointCollectionsServerTransport with the provided implementation. +// The returned RestorePointCollectionsServerTransport instance is connected to an instance of armcompute.RestorePointCollectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRestorePointCollectionsServerTransport(srv *RestorePointCollectionsServer) *RestorePointCollectionsServerTransport { + return &RestorePointCollectionsServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armcompute.RestorePointCollectionsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.RestorePointCollectionsClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armcompute.RestorePointCollectionsClientListAllResponse]](), + } +} + +// RestorePointCollectionsServerTransport connects instances of armcompute.RestorePointCollectionsClient to instances of RestorePointCollectionsServer. +// Don't use this type directly, use NewRestorePointCollectionsServerTransport instead. +type RestorePointCollectionsServerTransport struct { + srv *RestorePointCollectionsServer + beginDelete *tracker[azfake.PollerResponder[armcompute.RestorePointCollectionsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.RestorePointCollectionsClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armcompute.RestorePointCollectionsClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for RestorePointCollectionsServerTransport. +func (r *RestorePointCollectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RestorePointCollectionsClient.CreateOrUpdate": + resp, err = r.dispatchCreateOrUpdate(req) + case "RestorePointCollectionsClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RestorePointCollectionsClient.Get": + resp, err = r.dispatchGet(req) + case "RestorePointCollectionsClient.NewListPager": + resp, err = r.dispatchNewListPager(req) + case "RestorePointCollectionsClient.NewListAllPager": + resp, err = r.dispatchNewListAllPager(req) + case "RestorePointCollectionsClient.Update": + resp, err = r.dispatchUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RestorePointCollectionsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if r.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RestorePointCollection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RestorePointCollection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RestorePointCollectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RestorePointCollectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.RestorePointCollectionExpandOptions(expandUnescaped)) + var options *armcompute.RestorePointCollectionsClientGetOptions + if expandParam != nil { + options = &armcompute.RestorePointCollectionsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RestorePointCollection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RestorePointCollectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := r.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := r.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + r.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.RestorePointCollectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + r.newListPager.remove(req) + } + return resp, nil +} + +func (r *RestorePointCollectionsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := r.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := r.srv.NewListAllPager(nil) + newListAllPager = &resp + r.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armcompute.RestorePointCollectionsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + r.newListAllPager.remove(req) + } + return resp, nil +} + +func (r *RestorePointCollectionsServerTransport) dispatchUpdate(req *http.Request) (*http.Response, error) { + if r.srv.Update == nil { + return nil, &nonRetriableError{errors.New("fake for method Update not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RestorePointCollectionUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.Update(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RestorePointCollection, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/restorepoints_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/restorepoints_server.go new file mode 100644 index 00000000000..6d636e309d9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/restorepoints_server.go @@ -0,0 +1,234 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// RestorePointsServer is a fake server for instances of the armcompute.RestorePointsClient type. +type RestorePointsServer struct { + // BeginCreate is the fake for method RestorePointsClient.BeginCreate + // HTTP status codes to indicate success: http.StatusCreated + BeginCreate func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters armcompute.RestorePoint, options *armcompute.RestorePointsClientBeginCreateOptions) (resp azfake.PollerResponder[armcompute.RestorePointsClientCreateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RestorePointsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *armcompute.RestorePointsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.RestorePointsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RestorePointsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *armcompute.RestorePointsClientGetOptions) (resp azfake.Responder[armcompute.RestorePointsClientGetResponse], errResp azfake.ErrorResponder) +} + +// NewRestorePointsServerTransport creates a new instance of RestorePointsServerTransport with the provided implementation. +// The returned RestorePointsServerTransport instance is connected to an instance of armcompute.RestorePointsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRestorePointsServerTransport(srv *RestorePointsServer) *RestorePointsServerTransport { + return &RestorePointsServerTransport{ + srv: srv, + beginCreate: newTracker[azfake.PollerResponder[armcompute.RestorePointsClientCreateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.RestorePointsClientDeleteResponse]](), + } +} + +// RestorePointsServerTransport connects instances of armcompute.RestorePointsClient to instances of RestorePointsServer. +// Don't use this type directly, use NewRestorePointsServerTransport instead. +type RestorePointsServerTransport struct { + srv *RestorePointsServer + beginCreate *tracker[azfake.PollerResponder[armcompute.RestorePointsClientCreateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.RestorePointsClientDeleteResponse]] +} + +// Do implements the policy.Transporter interface for RestorePointsServerTransport. +func (r *RestorePointsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RestorePointsClient.BeginCreate": + resp, err = r.dispatchBeginCreate(req) + case "RestorePointsClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RestorePointsClient.Get": + resp, err = r.dispatchGet(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RestorePointsServerTransport) dispatchBeginCreate(req *http.Request) (*http.Response, error) { + if r.srv.BeginCreate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreate not implemented")} + } + beginCreate := r.beginCreate.get(req) + if beginCreate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RestorePoint](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + restorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginCreate(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, restorePointNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreate = &respr + r.beginCreate.add(req, beginCreate) + } + + resp, err := server.PollerResponderNext(beginCreate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusCreated}, resp.StatusCode) { + r.beginCreate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreate) { + r.beginCreate.remove(req) + } + + return resp, nil +} + +func (r *RestorePointsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + restorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, restorePointNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RestorePointsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/restorePointCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restorePoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + restorePointCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointCollectionName")]) + if err != nil { + return nil, err + } + restorePointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("restorePointName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.RestorePointExpandOptions(expandUnescaped)) + var options *armcompute.RestorePointsClientGetOptions + if expandParam != nil { + options = &armcompute.RestorePointsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, restorePointCollectionNameParam, restorePointNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RestorePoint, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/server_factory.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/server_factory.go new file mode 100644 index 00000000000..4ec55a49480 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/server_factory.go @@ -0,0 +1,395 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strings" + "sync" +) + +// ServerFactory is a fake server for instances of the armcompute.ClientFactory type. +type ServerFactory struct { + AvailabilitySetsServer AvailabilitySetsServer + CapacityReservationGroupsServer CapacityReservationGroupsServer + CapacityReservationsServer CapacityReservationsServer + CloudServiceOperatingSystemsServer CloudServiceOperatingSystemsServer + CloudServiceRoleInstancesServer CloudServiceRoleInstancesServer + CloudServiceRolesServer CloudServiceRolesServer + CloudServicesServer CloudServicesServer + CloudServicesUpdateDomainServer CloudServicesUpdateDomainServer + CommunityGalleriesServer CommunityGalleriesServer + CommunityGalleryImageVersionsServer CommunityGalleryImageVersionsServer + CommunityGalleryImagesServer CommunityGalleryImagesServer + DedicatedHostGroupsServer DedicatedHostGroupsServer + DedicatedHostsServer DedicatedHostsServer + DiskAccessesServer DiskAccessesServer + DiskEncryptionSetsServer DiskEncryptionSetsServer + DiskRestorePointServer DiskRestorePointServer + DisksServer DisksServer + GalleriesServer GalleriesServer + GalleryApplicationVersionsServer GalleryApplicationVersionsServer + GalleryApplicationsServer GalleryApplicationsServer + GalleryImageVersionsServer GalleryImageVersionsServer + GalleryImagesServer GalleryImagesServer + GallerySharingProfileServer GallerySharingProfileServer + ImagesServer ImagesServer + LogAnalyticsServer LogAnalyticsServer + OperationsServer OperationsServer + ProximityPlacementGroupsServer ProximityPlacementGroupsServer + ResourceSKUsServer ResourceSKUsServer + RestorePointCollectionsServer RestorePointCollectionsServer + RestorePointsServer RestorePointsServer + SSHPublicKeysServer SSHPublicKeysServer + SharedGalleriesServer SharedGalleriesServer + SharedGalleryImageVersionsServer SharedGalleryImageVersionsServer + SharedGalleryImagesServer SharedGalleryImagesServer + SnapshotsServer SnapshotsServer + UsageServer UsageServer + VirtualMachineExtensionImagesServer VirtualMachineExtensionImagesServer + VirtualMachineExtensionsServer VirtualMachineExtensionsServer + VirtualMachineImagesServer VirtualMachineImagesServer + VirtualMachineImagesEdgeZoneServer VirtualMachineImagesEdgeZoneServer + VirtualMachineRunCommandsServer VirtualMachineRunCommandsServer + VirtualMachineScaleSetExtensionsServer VirtualMachineScaleSetExtensionsServer + VirtualMachineScaleSetRollingUpgradesServer VirtualMachineScaleSetRollingUpgradesServer + VirtualMachineScaleSetVMExtensionsServer VirtualMachineScaleSetVMExtensionsServer + VirtualMachineScaleSetVMRunCommandsServer VirtualMachineScaleSetVMRunCommandsServer + VirtualMachineScaleSetVMsServer VirtualMachineScaleSetVMsServer + VirtualMachineScaleSetsServer VirtualMachineScaleSetsServer + VirtualMachineSizesServer VirtualMachineSizesServer + VirtualMachinesServer VirtualMachinesServer +} + +// NewServerFactoryTransport creates a new instance of ServerFactoryTransport with the provided implementation. +// The returned ServerFactoryTransport instance is connected to an instance of armcompute.ClientFactory via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewServerFactoryTransport(srv *ServerFactory) *ServerFactoryTransport { + return &ServerFactoryTransport{ + srv: srv, + } +} + +// ServerFactoryTransport connects instances of armcompute.ClientFactory to instances of ServerFactory. +// Don't use this type directly, use NewServerFactoryTransport instead. +type ServerFactoryTransport struct { + srv *ServerFactory + trMu sync.Mutex + trAvailabilitySetsServer *AvailabilitySetsServerTransport + trCapacityReservationGroupsServer *CapacityReservationGroupsServerTransport + trCapacityReservationsServer *CapacityReservationsServerTransport + trCloudServiceOperatingSystemsServer *CloudServiceOperatingSystemsServerTransport + trCloudServiceRoleInstancesServer *CloudServiceRoleInstancesServerTransport + trCloudServiceRolesServer *CloudServiceRolesServerTransport + trCloudServicesServer *CloudServicesServerTransport + trCloudServicesUpdateDomainServer *CloudServicesUpdateDomainServerTransport + trCommunityGalleriesServer *CommunityGalleriesServerTransport + trCommunityGalleryImageVersionsServer *CommunityGalleryImageVersionsServerTransport + trCommunityGalleryImagesServer *CommunityGalleryImagesServerTransport + trDedicatedHostGroupsServer *DedicatedHostGroupsServerTransport + trDedicatedHostsServer *DedicatedHostsServerTransport + trDiskAccessesServer *DiskAccessesServerTransport + trDiskEncryptionSetsServer *DiskEncryptionSetsServerTransport + trDiskRestorePointServer *DiskRestorePointServerTransport + trDisksServer *DisksServerTransport + trGalleriesServer *GalleriesServerTransport + trGalleryApplicationVersionsServer *GalleryApplicationVersionsServerTransport + trGalleryApplicationsServer *GalleryApplicationsServerTransport + trGalleryImageVersionsServer *GalleryImageVersionsServerTransport + trGalleryImagesServer *GalleryImagesServerTransport + trGallerySharingProfileServer *GallerySharingProfileServerTransport + trImagesServer *ImagesServerTransport + trLogAnalyticsServer *LogAnalyticsServerTransport + trOperationsServer *OperationsServerTransport + trProximityPlacementGroupsServer *ProximityPlacementGroupsServerTransport + trResourceSKUsServer *ResourceSKUsServerTransport + trRestorePointCollectionsServer *RestorePointCollectionsServerTransport + trRestorePointsServer *RestorePointsServerTransport + trSSHPublicKeysServer *SSHPublicKeysServerTransport + trSharedGalleriesServer *SharedGalleriesServerTransport + trSharedGalleryImageVersionsServer *SharedGalleryImageVersionsServerTransport + trSharedGalleryImagesServer *SharedGalleryImagesServerTransport + trSnapshotsServer *SnapshotsServerTransport + trUsageServer *UsageServerTransport + trVirtualMachineExtensionImagesServer *VirtualMachineExtensionImagesServerTransport + trVirtualMachineExtensionsServer *VirtualMachineExtensionsServerTransport + trVirtualMachineImagesServer *VirtualMachineImagesServerTransport + trVirtualMachineImagesEdgeZoneServer *VirtualMachineImagesEdgeZoneServerTransport + trVirtualMachineRunCommandsServer *VirtualMachineRunCommandsServerTransport + trVirtualMachineScaleSetExtensionsServer *VirtualMachineScaleSetExtensionsServerTransport + trVirtualMachineScaleSetRollingUpgradesServer *VirtualMachineScaleSetRollingUpgradesServerTransport + trVirtualMachineScaleSetVMExtensionsServer *VirtualMachineScaleSetVMExtensionsServerTransport + trVirtualMachineScaleSetVMRunCommandsServer *VirtualMachineScaleSetVMRunCommandsServerTransport + trVirtualMachineScaleSetVMsServer *VirtualMachineScaleSetVMsServerTransport + trVirtualMachineScaleSetsServer *VirtualMachineScaleSetsServerTransport + trVirtualMachineSizesServer *VirtualMachineSizesServerTransport + trVirtualMachinesServer *VirtualMachinesServerTransport +} + +// Do implements the policy.Transporter interface for ServerFactoryTransport. +func (s *ServerFactoryTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + client := method[:strings.Index(method, ".")] + var resp *http.Response + var err error + + switch client { + case "AvailabilitySetsClient": + initServer(s, &s.trAvailabilitySetsServer, func() *AvailabilitySetsServerTransport { + return NewAvailabilitySetsServerTransport(&s.srv.AvailabilitySetsServer) + }) + resp, err = s.trAvailabilitySetsServer.Do(req) + case "CapacityReservationGroupsClient": + initServer(s, &s.trCapacityReservationGroupsServer, func() *CapacityReservationGroupsServerTransport { + return NewCapacityReservationGroupsServerTransport(&s.srv.CapacityReservationGroupsServer) + }) + resp, err = s.trCapacityReservationGroupsServer.Do(req) + case "CapacityReservationsClient": + initServer(s, &s.trCapacityReservationsServer, func() *CapacityReservationsServerTransport { + return NewCapacityReservationsServerTransport(&s.srv.CapacityReservationsServer) + }) + resp, err = s.trCapacityReservationsServer.Do(req) + case "CloudServiceOperatingSystemsClient": + initServer(s, &s.trCloudServiceOperatingSystemsServer, func() *CloudServiceOperatingSystemsServerTransport { + return NewCloudServiceOperatingSystemsServerTransport(&s.srv.CloudServiceOperatingSystemsServer) + }) + resp, err = s.trCloudServiceOperatingSystemsServer.Do(req) + case "CloudServiceRoleInstancesClient": + initServer(s, &s.trCloudServiceRoleInstancesServer, func() *CloudServiceRoleInstancesServerTransport { + return NewCloudServiceRoleInstancesServerTransport(&s.srv.CloudServiceRoleInstancesServer) + }) + resp, err = s.trCloudServiceRoleInstancesServer.Do(req) + case "CloudServiceRolesClient": + initServer(s, &s.trCloudServiceRolesServer, func() *CloudServiceRolesServerTransport { + return NewCloudServiceRolesServerTransport(&s.srv.CloudServiceRolesServer) + }) + resp, err = s.trCloudServiceRolesServer.Do(req) + case "CloudServicesClient": + initServer(s, &s.trCloudServicesServer, func() *CloudServicesServerTransport { + return NewCloudServicesServerTransport(&s.srv.CloudServicesServer) + }) + resp, err = s.trCloudServicesServer.Do(req) + case "CloudServicesUpdateDomainClient": + initServer(s, &s.trCloudServicesUpdateDomainServer, func() *CloudServicesUpdateDomainServerTransport { + return NewCloudServicesUpdateDomainServerTransport(&s.srv.CloudServicesUpdateDomainServer) + }) + resp, err = s.trCloudServicesUpdateDomainServer.Do(req) + case "CommunityGalleriesClient": + initServer(s, &s.trCommunityGalleriesServer, func() *CommunityGalleriesServerTransport { + return NewCommunityGalleriesServerTransport(&s.srv.CommunityGalleriesServer) + }) + resp, err = s.trCommunityGalleriesServer.Do(req) + case "CommunityGalleryImageVersionsClient": + initServer(s, &s.trCommunityGalleryImageVersionsServer, func() *CommunityGalleryImageVersionsServerTransport { + return NewCommunityGalleryImageVersionsServerTransport(&s.srv.CommunityGalleryImageVersionsServer) + }) + resp, err = s.trCommunityGalleryImageVersionsServer.Do(req) + case "CommunityGalleryImagesClient": + initServer(s, &s.trCommunityGalleryImagesServer, func() *CommunityGalleryImagesServerTransport { + return NewCommunityGalleryImagesServerTransport(&s.srv.CommunityGalleryImagesServer) + }) + resp, err = s.trCommunityGalleryImagesServer.Do(req) + case "DedicatedHostGroupsClient": + initServer(s, &s.trDedicatedHostGroupsServer, func() *DedicatedHostGroupsServerTransport { + return NewDedicatedHostGroupsServerTransport(&s.srv.DedicatedHostGroupsServer) + }) + resp, err = s.trDedicatedHostGroupsServer.Do(req) + case "DedicatedHostsClient": + initServer(s, &s.trDedicatedHostsServer, func() *DedicatedHostsServerTransport { + return NewDedicatedHostsServerTransport(&s.srv.DedicatedHostsServer) + }) + resp, err = s.trDedicatedHostsServer.Do(req) + case "DiskAccessesClient": + initServer(s, &s.trDiskAccessesServer, func() *DiskAccessesServerTransport { return NewDiskAccessesServerTransport(&s.srv.DiskAccessesServer) }) + resp, err = s.trDiskAccessesServer.Do(req) + case "DiskEncryptionSetsClient": + initServer(s, &s.trDiskEncryptionSetsServer, func() *DiskEncryptionSetsServerTransport { + return NewDiskEncryptionSetsServerTransport(&s.srv.DiskEncryptionSetsServer) + }) + resp, err = s.trDiskEncryptionSetsServer.Do(req) + case "DiskRestorePointClient": + initServer(s, &s.trDiskRestorePointServer, func() *DiskRestorePointServerTransport { + return NewDiskRestorePointServerTransport(&s.srv.DiskRestorePointServer) + }) + resp, err = s.trDiskRestorePointServer.Do(req) + case "DisksClient": + initServer(s, &s.trDisksServer, func() *DisksServerTransport { return NewDisksServerTransport(&s.srv.DisksServer) }) + resp, err = s.trDisksServer.Do(req) + case "GalleriesClient": + initServer(s, &s.trGalleriesServer, func() *GalleriesServerTransport { return NewGalleriesServerTransport(&s.srv.GalleriesServer) }) + resp, err = s.trGalleriesServer.Do(req) + case "GalleryApplicationVersionsClient": + initServer(s, &s.trGalleryApplicationVersionsServer, func() *GalleryApplicationVersionsServerTransport { + return NewGalleryApplicationVersionsServerTransport(&s.srv.GalleryApplicationVersionsServer) + }) + resp, err = s.trGalleryApplicationVersionsServer.Do(req) + case "GalleryApplicationsClient": + initServer(s, &s.trGalleryApplicationsServer, func() *GalleryApplicationsServerTransport { + return NewGalleryApplicationsServerTransport(&s.srv.GalleryApplicationsServer) + }) + resp, err = s.trGalleryApplicationsServer.Do(req) + case "GalleryImageVersionsClient": + initServer(s, &s.trGalleryImageVersionsServer, func() *GalleryImageVersionsServerTransport { + return NewGalleryImageVersionsServerTransport(&s.srv.GalleryImageVersionsServer) + }) + resp, err = s.trGalleryImageVersionsServer.Do(req) + case "GalleryImagesClient": + initServer(s, &s.trGalleryImagesServer, func() *GalleryImagesServerTransport { + return NewGalleryImagesServerTransport(&s.srv.GalleryImagesServer) + }) + resp, err = s.trGalleryImagesServer.Do(req) + case "GallerySharingProfileClient": + initServer(s, &s.trGallerySharingProfileServer, func() *GallerySharingProfileServerTransport { + return NewGallerySharingProfileServerTransport(&s.srv.GallerySharingProfileServer) + }) + resp, err = s.trGallerySharingProfileServer.Do(req) + case "ImagesClient": + initServer(s, &s.trImagesServer, func() *ImagesServerTransport { return NewImagesServerTransport(&s.srv.ImagesServer) }) + resp, err = s.trImagesServer.Do(req) + case "LogAnalyticsClient": + initServer(s, &s.trLogAnalyticsServer, func() *LogAnalyticsServerTransport { return NewLogAnalyticsServerTransport(&s.srv.LogAnalyticsServer) }) + resp, err = s.trLogAnalyticsServer.Do(req) + case "OperationsClient": + initServer(s, &s.trOperationsServer, func() *OperationsServerTransport { return NewOperationsServerTransport(&s.srv.OperationsServer) }) + resp, err = s.trOperationsServer.Do(req) + case "ProximityPlacementGroupsClient": + initServer(s, &s.trProximityPlacementGroupsServer, func() *ProximityPlacementGroupsServerTransport { + return NewProximityPlacementGroupsServerTransport(&s.srv.ProximityPlacementGroupsServer) + }) + resp, err = s.trProximityPlacementGroupsServer.Do(req) + case "ResourceSKUsClient": + initServer(s, &s.trResourceSKUsServer, func() *ResourceSKUsServerTransport { return NewResourceSKUsServerTransport(&s.srv.ResourceSKUsServer) }) + resp, err = s.trResourceSKUsServer.Do(req) + case "RestorePointCollectionsClient": + initServer(s, &s.trRestorePointCollectionsServer, func() *RestorePointCollectionsServerTransport { + return NewRestorePointCollectionsServerTransport(&s.srv.RestorePointCollectionsServer) + }) + resp, err = s.trRestorePointCollectionsServer.Do(req) + case "RestorePointsClient": + initServer(s, &s.trRestorePointsServer, func() *RestorePointsServerTransport { + return NewRestorePointsServerTransport(&s.srv.RestorePointsServer) + }) + resp, err = s.trRestorePointsServer.Do(req) + case "SSHPublicKeysClient": + initServer(s, &s.trSSHPublicKeysServer, func() *SSHPublicKeysServerTransport { + return NewSSHPublicKeysServerTransport(&s.srv.SSHPublicKeysServer) + }) + resp, err = s.trSSHPublicKeysServer.Do(req) + case "SharedGalleriesClient": + initServer(s, &s.trSharedGalleriesServer, func() *SharedGalleriesServerTransport { + return NewSharedGalleriesServerTransport(&s.srv.SharedGalleriesServer) + }) + resp, err = s.trSharedGalleriesServer.Do(req) + case "SharedGalleryImageVersionsClient": + initServer(s, &s.trSharedGalleryImageVersionsServer, func() *SharedGalleryImageVersionsServerTransport { + return NewSharedGalleryImageVersionsServerTransport(&s.srv.SharedGalleryImageVersionsServer) + }) + resp, err = s.trSharedGalleryImageVersionsServer.Do(req) + case "SharedGalleryImagesClient": + initServer(s, &s.trSharedGalleryImagesServer, func() *SharedGalleryImagesServerTransport { + return NewSharedGalleryImagesServerTransport(&s.srv.SharedGalleryImagesServer) + }) + resp, err = s.trSharedGalleryImagesServer.Do(req) + case "SnapshotsClient": + initServer(s, &s.trSnapshotsServer, func() *SnapshotsServerTransport { return NewSnapshotsServerTransport(&s.srv.SnapshotsServer) }) + resp, err = s.trSnapshotsServer.Do(req) + case "UsageClient": + initServer(s, &s.trUsageServer, func() *UsageServerTransport { return NewUsageServerTransport(&s.srv.UsageServer) }) + resp, err = s.trUsageServer.Do(req) + case "VirtualMachineExtensionImagesClient": + initServer(s, &s.trVirtualMachineExtensionImagesServer, func() *VirtualMachineExtensionImagesServerTransport { + return NewVirtualMachineExtensionImagesServerTransport(&s.srv.VirtualMachineExtensionImagesServer) + }) + resp, err = s.trVirtualMachineExtensionImagesServer.Do(req) + case "VirtualMachineExtensionsClient": + initServer(s, &s.trVirtualMachineExtensionsServer, func() *VirtualMachineExtensionsServerTransport { + return NewVirtualMachineExtensionsServerTransport(&s.srv.VirtualMachineExtensionsServer) + }) + resp, err = s.trVirtualMachineExtensionsServer.Do(req) + case "VirtualMachineImagesClient": + initServer(s, &s.trVirtualMachineImagesServer, func() *VirtualMachineImagesServerTransport { + return NewVirtualMachineImagesServerTransport(&s.srv.VirtualMachineImagesServer) + }) + resp, err = s.trVirtualMachineImagesServer.Do(req) + case "VirtualMachineImagesEdgeZoneClient": + initServer(s, &s.trVirtualMachineImagesEdgeZoneServer, func() *VirtualMachineImagesEdgeZoneServerTransport { + return NewVirtualMachineImagesEdgeZoneServerTransport(&s.srv.VirtualMachineImagesEdgeZoneServer) + }) + resp, err = s.trVirtualMachineImagesEdgeZoneServer.Do(req) + case "VirtualMachineRunCommandsClient": + initServer(s, &s.trVirtualMachineRunCommandsServer, func() *VirtualMachineRunCommandsServerTransport { + return NewVirtualMachineRunCommandsServerTransport(&s.srv.VirtualMachineRunCommandsServer) + }) + resp, err = s.trVirtualMachineRunCommandsServer.Do(req) + case "VirtualMachineScaleSetExtensionsClient": + initServer(s, &s.trVirtualMachineScaleSetExtensionsServer, func() *VirtualMachineScaleSetExtensionsServerTransport { + return NewVirtualMachineScaleSetExtensionsServerTransport(&s.srv.VirtualMachineScaleSetExtensionsServer) + }) + resp, err = s.trVirtualMachineScaleSetExtensionsServer.Do(req) + case "VirtualMachineScaleSetRollingUpgradesClient": + initServer(s, &s.trVirtualMachineScaleSetRollingUpgradesServer, func() *VirtualMachineScaleSetRollingUpgradesServerTransport { + return NewVirtualMachineScaleSetRollingUpgradesServerTransport(&s.srv.VirtualMachineScaleSetRollingUpgradesServer) + }) + resp, err = s.trVirtualMachineScaleSetRollingUpgradesServer.Do(req) + case "VirtualMachineScaleSetVMExtensionsClient": + initServer(s, &s.trVirtualMachineScaleSetVMExtensionsServer, func() *VirtualMachineScaleSetVMExtensionsServerTransport { + return NewVirtualMachineScaleSetVMExtensionsServerTransport(&s.srv.VirtualMachineScaleSetVMExtensionsServer) + }) + resp, err = s.trVirtualMachineScaleSetVMExtensionsServer.Do(req) + case "VirtualMachineScaleSetVMRunCommandsClient": + initServer(s, &s.trVirtualMachineScaleSetVMRunCommandsServer, func() *VirtualMachineScaleSetVMRunCommandsServerTransport { + return NewVirtualMachineScaleSetVMRunCommandsServerTransport(&s.srv.VirtualMachineScaleSetVMRunCommandsServer) + }) + resp, err = s.trVirtualMachineScaleSetVMRunCommandsServer.Do(req) + case "VirtualMachineScaleSetVMsClient": + initServer(s, &s.trVirtualMachineScaleSetVMsServer, func() *VirtualMachineScaleSetVMsServerTransport { + return NewVirtualMachineScaleSetVMsServerTransport(&s.srv.VirtualMachineScaleSetVMsServer) + }) + resp, err = s.trVirtualMachineScaleSetVMsServer.Do(req) + case "VirtualMachineScaleSetsClient": + initServer(s, &s.trVirtualMachineScaleSetsServer, func() *VirtualMachineScaleSetsServerTransport { + return NewVirtualMachineScaleSetsServerTransport(&s.srv.VirtualMachineScaleSetsServer) + }) + resp, err = s.trVirtualMachineScaleSetsServer.Do(req) + case "VirtualMachineSizesClient": + initServer(s, &s.trVirtualMachineSizesServer, func() *VirtualMachineSizesServerTransport { + return NewVirtualMachineSizesServerTransport(&s.srv.VirtualMachineSizesServer) + }) + resp, err = s.trVirtualMachineSizesServer.Do(req) + case "VirtualMachinesClient": + initServer(s, &s.trVirtualMachinesServer, func() *VirtualMachinesServerTransport { + return NewVirtualMachinesServerTransport(&s.srv.VirtualMachinesServer) + }) + resp, err = s.trVirtualMachinesServer.Do(req) + default: + err = fmt.Errorf("unhandled client %s", client) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func initServer[T any](s *ServerFactoryTransport, dst **T, src func() *T) { + s.trMu.Lock() + if *dst == nil { + *dst = src() + } + s.trMu.Unlock() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleries_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleries_server.go new file mode 100644 index 00000000000..6c67868890d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleries_server.go @@ -0,0 +1,160 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// SharedGalleriesServer is a fake server for instances of the armcompute.SharedGalleriesClient type. +type SharedGalleriesServer struct { + // Get is the fake for method SharedGalleriesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, galleryUniqueName string, options *armcompute.SharedGalleriesClientGetOptions) (resp azfake.Responder[armcompute.SharedGalleriesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SharedGalleriesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armcompute.SharedGalleriesClientListOptions) (resp azfake.PagerResponder[armcompute.SharedGalleriesClientListResponse]) +} + +// NewSharedGalleriesServerTransport creates a new instance of SharedGalleriesServerTransport with the provided implementation. +// The returned SharedGalleriesServerTransport instance is connected to an instance of armcompute.SharedGalleriesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSharedGalleriesServerTransport(srv *SharedGalleriesServer) *SharedGalleriesServerTransport { + return &SharedGalleriesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.SharedGalleriesClientListResponse]](), + } +} + +// SharedGalleriesServerTransport connects instances of armcompute.SharedGalleriesClient to instances of SharedGalleriesServer. +// Don't use this type directly, use NewSharedGalleriesServerTransport instead. +type SharedGalleriesServerTransport struct { + srv *SharedGalleriesServer + newListPager *tracker[azfake.PagerResponder[armcompute.SharedGalleriesClientListResponse]] +} + +// Do implements the policy.Transporter interface for SharedGalleriesServerTransport. +func (s *SharedGalleriesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SharedGalleriesClient.Get": + resp, err = s.dispatchGet(req) + case "SharedGalleriesClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SharedGalleriesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + galleryUniqueNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryUniqueName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), locationParam, galleryUniqueNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SharedGallery, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SharedGalleriesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedGalleries` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + sharedToUnescaped, err := url.QueryUnescape(qp.Get("sharedTo")) + if err != nil { + return nil, err + } + sharedToParam := getOptional(armcompute.SharedToValues(sharedToUnescaped)) + var options *armcompute.SharedGalleriesClientListOptions + if sharedToParam != nil { + options = &armcompute.SharedGalleriesClientListOptions{ + SharedTo: sharedToParam, + } + } + resp := s.srv.NewListPager(locationParam, options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.SharedGalleriesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleryimages_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleryimages_server.go new file mode 100644 index 00000000000..ad5d085f225 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleryimages_server.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// SharedGalleryImagesServer is a fake server for instances of the armcompute.SharedGalleryImagesClient type. +type SharedGalleryImagesServer struct { + // Get is the fake for method SharedGalleryImagesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, options *armcompute.SharedGalleryImagesClientGetOptions) (resp azfake.Responder[armcompute.SharedGalleryImagesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SharedGalleryImagesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, galleryUniqueName string, options *armcompute.SharedGalleryImagesClientListOptions) (resp azfake.PagerResponder[armcompute.SharedGalleryImagesClientListResponse]) +} + +// NewSharedGalleryImagesServerTransport creates a new instance of SharedGalleryImagesServerTransport with the provided implementation. +// The returned SharedGalleryImagesServerTransport instance is connected to an instance of armcompute.SharedGalleryImagesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSharedGalleryImagesServerTransport(srv *SharedGalleryImagesServer) *SharedGalleryImagesServerTransport { + return &SharedGalleryImagesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.SharedGalleryImagesClientListResponse]](), + } +} + +// SharedGalleryImagesServerTransport connects instances of armcompute.SharedGalleryImagesClient to instances of SharedGalleryImagesServer. +// Don't use this type directly, use NewSharedGalleryImagesServerTransport instead. +type SharedGalleryImagesServerTransport struct { + srv *SharedGalleryImagesServer + newListPager *tracker[azfake.PagerResponder[armcompute.SharedGalleryImagesClientListResponse]] +} + +// Do implements the policy.Transporter interface for SharedGalleryImagesServerTransport. +func (s *SharedGalleryImagesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SharedGalleryImagesClient.Get": + resp, err = s.dispatchGet(req) + case "SharedGalleryImagesClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SharedGalleryImagesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + galleryUniqueNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryUniqueName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), locationParam, galleryUniqueNameParam, galleryImageNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SharedGalleryImage, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SharedGalleryImagesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + galleryUniqueNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryUniqueName")]) + if err != nil { + return nil, err + } + sharedToUnescaped, err := url.QueryUnescape(qp.Get("sharedTo")) + if err != nil { + return nil, err + } + sharedToParam := getOptional(armcompute.SharedToValues(sharedToUnescaped)) + var options *armcompute.SharedGalleryImagesClientListOptions + if sharedToParam != nil { + options = &armcompute.SharedGalleryImagesClientListOptions{ + SharedTo: sharedToParam, + } + } + resp := s.srv.NewListPager(locationParam, galleryUniqueNameParam, options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.SharedGalleryImagesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleryimageversions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleryimageversions_server.go new file mode 100644 index 00000000000..20621fc2c6c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sharedgalleryimageversions_server.go @@ -0,0 +1,176 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// SharedGalleryImageVersionsServer is a fake server for instances of the armcompute.SharedGalleryImageVersionsClient type. +type SharedGalleryImageVersionsServer struct { + // Get is the fake for method SharedGalleryImageVersionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, galleryImageVersionName string, options *armcompute.SharedGalleryImageVersionsClientGetOptions) (resp azfake.Responder[armcompute.SharedGalleryImageVersionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SharedGalleryImageVersionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, galleryUniqueName string, galleryImageName string, options *armcompute.SharedGalleryImageVersionsClientListOptions) (resp azfake.PagerResponder[armcompute.SharedGalleryImageVersionsClientListResponse]) +} + +// NewSharedGalleryImageVersionsServerTransport creates a new instance of SharedGalleryImageVersionsServerTransport with the provided implementation. +// The returned SharedGalleryImageVersionsServerTransport instance is connected to an instance of armcompute.SharedGalleryImageVersionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSharedGalleryImageVersionsServerTransport(srv *SharedGalleryImageVersionsServer) *SharedGalleryImageVersionsServerTransport { + return &SharedGalleryImageVersionsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.SharedGalleryImageVersionsClientListResponse]](), + } +} + +// SharedGalleryImageVersionsServerTransport connects instances of armcompute.SharedGalleryImageVersionsClient to instances of SharedGalleryImageVersionsServer. +// Don't use this type directly, use NewSharedGalleryImageVersionsServerTransport instead. +type SharedGalleryImageVersionsServerTransport struct { + srv *SharedGalleryImageVersionsServer + newListPager *tracker[azfake.PagerResponder[armcompute.SharedGalleryImageVersionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for SharedGalleryImageVersionsServerTransport. +func (s *SharedGalleryImageVersionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SharedGalleryImageVersionsClient.Get": + resp, err = s.dispatchGet(req) + case "SharedGalleryImageVersionsClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SharedGalleryImageVersionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + galleryUniqueNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryUniqueName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + galleryImageVersionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageVersionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), locationParam, galleryUniqueNameParam, galleryImageNameParam, galleryImageVersionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SharedGalleryImageVersion, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SharedGalleryImageVersionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedGalleries/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/images/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + galleryUniqueNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryUniqueName")]) + if err != nil { + return nil, err + } + galleryImageNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("galleryImageName")]) + if err != nil { + return nil, err + } + sharedToUnescaped, err := url.QueryUnescape(qp.Get("sharedTo")) + if err != nil { + return nil, err + } + sharedToParam := getOptional(armcompute.SharedToValues(sharedToUnescaped)) + var options *armcompute.SharedGalleryImageVersionsClientListOptions + if sharedToParam != nil { + options = &armcompute.SharedGalleryImageVersionsClientListOptions{ + SharedTo: sharedToParam, + } + } + resp := s.srv.NewListPager(locationParam, galleryUniqueNameParam, galleryImageNameParam, options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.SharedGalleryImageVersionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/snapshots_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/snapshots_server.go new file mode 100644 index 00000000000..b9bba8f4ed0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/snapshots_server.go @@ -0,0 +1,461 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// SnapshotsServer is a fake server for instances of the armcompute.SnapshotsClient type. +type SnapshotsServer struct { + // BeginCreateOrUpdate is the fake for method SnapshotsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, snapshotName string, snapshot armcompute.Snapshot, options *armcompute.SnapshotsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.SnapshotsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method SnapshotsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, snapshotName string, options *armcompute.SnapshotsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.SnapshotsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SnapshotsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, snapshotName string, options *armcompute.SnapshotsClientGetOptions) (resp azfake.Responder[armcompute.SnapshotsClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGrantAccess is the fake for method SnapshotsClient.BeginGrantAccess + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGrantAccess func(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData armcompute.GrantAccessData, options *armcompute.SnapshotsClientBeginGrantAccessOptions) (resp azfake.PollerResponder[armcompute.SnapshotsClientGrantAccessResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SnapshotsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armcompute.SnapshotsClientListOptions) (resp azfake.PagerResponder[armcompute.SnapshotsClientListResponse]) + + // NewListByResourceGroupPager is the fake for method SnapshotsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.SnapshotsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.SnapshotsClientListByResourceGroupResponse]) + + // BeginRevokeAccess is the fake for method SnapshotsClient.BeginRevokeAccess + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRevokeAccess func(ctx context.Context, resourceGroupName string, snapshotName string, options *armcompute.SnapshotsClientBeginRevokeAccessOptions) (resp azfake.PollerResponder[armcompute.SnapshotsClientRevokeAccessResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method SnapshotsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, snapshotName string, snapshot armcompute.SnapshotUpdate, options *armcompute.SnapshotsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.SnapshotsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewSnapshotsServerTransport creates a new instance of SnapshotsServerTransport with the provided implementation. +// The returned SnapshotsServerTransport instance is connected to an instance of armcompute.SnapshotsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSnapshotsServerTransport(srv *SnapshotsServer) *SnapshotsServerTransport { + return &SnapshotsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.SnapshotsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.SnapshotsClientDeleteResponse]](), + beginGrantAccess: newTracker[azfake.PollerResponder[armcompute.SnapshotsClientGrantAccessResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.SnapshotsClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.SnapshotsClientListByResourceGroupResponse]](), + beginRevokeAccess: newTracker[azfake.PollerResponder[armcompute.SnapshotsClientRevokeAccessResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.SnapshotsClientUpdateResponse]](), + } +} + +// SnapshotsServerTransport connects instances of armcompute.SnapshotsClient to instances of SnapshotsServer. +// Don't use this type directly, use NewSnapshotsServerTransport instead. +type SnapshotsServerTransport struct { + srv *SnapshotsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.SnapshotsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.SnapshotsClientDeleteResponse]] + beginGrantAccess *tracker[azfake.PollerResponder[armcompute.SnapshotsClientGrantAccessResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.SnapshotsClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.SnapshotsClientListByResourceGroupResponse]] + beginRevokeAccess *tracker[azfake.PollerResponder[armcompute.SnapshotsClientRevokeAccessResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.SnapshotsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for SnapshotsServerTransport. +func (s *SnapshotsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SnapshotsClient.BeginCreateOrUpdate": + resp, err = s.dispatchBeginCreateOrUpdate(req) + case "SnapshotsClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "SnapshotsClient.Get": + resp, err = s.dispatchGet(req) + case "SnapshotsClient.BeginGrantAccess": + resp, err = s.dispatchBeginGrantAccess(req) + case "SnapshotsClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + case "SnapshotsClient.NewListByResourceGroupPager": + resp, err = s.dispatchNewListByResourceGroupPager(req) + case "SnapshotsClient.BeginRevokeAccess": + resp, err = s.dispatchBeginRevokeAccess(req) + case "SnapshotsClient.BeginUpdate": + resp, err = s.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := s.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.Snapshot](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + snapshotNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("snapshotName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, snapshotNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + s.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + s.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + s.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + snapshotNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("snapshotName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, snapshotNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + snapshotNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("snapshotName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, snapshotNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Snapshot, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchBeginGrantAccess(req *http.Request) (*http.Response, error) { + if s.srv.BeginGrantAccess == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGrantAccess not implemented")} + } + beginGrantAccess := s.beginGrantAccess.get(req) + if beginGrantAccess == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/beginGetAccess` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.GrantAccessData](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + snapshotNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("snapshotName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginGrantAccess(req.Context(), resourceGroupNameParam, snapshotNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGrantAccess = &respr + s.beginGrantAccess.add(req, beginGrantAccess) + } + + resp, err := server.PollerResponderNext(beginGrantAccess, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + s.beginGrantAccess.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGrantAccess) { + s.beginGrantAccess.remove(req) + } + + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := s.srv.NewListPager(nil) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.SnapshotsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := s.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + s.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.SnapshotsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + s.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchBeginRevokeAccess(req *http.Request) (*http.Response, error) { + if s.srv.BeginRevokeAccess == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRevokeAccess not implemented")} + } + beginRevokeAccess := s.beginRevokeAccess.get(req) + if beginRevokeAccess == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/endGetAccess` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + snapshotNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("snapshotName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginRevokeAccess(req.Context(), resourceGroupNameParam, snapshotNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRevokeAccess = &respr + s.beginRevokeAccess.add(req, beginRevokeAccess) + } + + resp, err := server.PollerResponderNext(beginRevokeAccess, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + s.beginRevokeAccess.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRevokeAccess) { + s.beginRevokeAccess.remove(req) + } + + return resp, nil +} + +func (s *SnapshotsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := s.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/snapshots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.SnapshotUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + snapshotNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("snapshotName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginUpdate(req.Context(), resourceGroupNameParam, snapshotNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + s.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + s.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + s.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sshpublickeys_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sshpublickeys_server.go new file mode 100644 index 00000000000..ecd7fdff84f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/sshpublickeys_server.go @@ -0,0 +1,364 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// SSHPublicKeysServer is a fake server for instances of the armcompute.SSHPublicKeysClient type. +type SSHPublicKeysServer struct { + // Create is the fake for method SSHPublicKeysClient.Create + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + Create func(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters armcompute.SSHPublicKeyResource, options *armcompute.SSHPublicKeysClientCreateOptions) (resp azfake.Responder[armcompute.SSHPublicKeysClientCreateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method SSHPublicKeysClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *armcompute.SSHPublicKeysClientDeleteOptions) (resp azfake.Responder[armcompute.SSHPublicKeysClientDeleteResponse], errResp azfake.ErrorResponder) + + // GenerateKeyPair is the fake for method SSHPublicKeysClient.GenerateKeyPair + // HTTP status codes to indicate success: http.StatusOK + GenerateKeyPair func(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *armcompute.SSHPublicKeysClientGenerateKeyPairOptions) (resp azfake.Responder[armcompute.SSHPublicKeysClientGenerateKeyPairResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SSHPublicKeysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *armcompute.SSHPublicKeysClientGetOptions) (resp azfake.Responder[armcompute.SSHPublicKeysClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByResourceGroupPager is the fake for method SSHPublicKeysClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armcompute.SSHPublicKeysClientListByResourceGroupOptions) (resp azfake.PagerResponder[armcompute.SSHPublicKeysClientListByResourceGroupResponse]) + + // NewListBySubscriptionPager is the fake for method SSHPublicKeysClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armcompute.SSHPublicKeysClientListBySubscriptionOptions) (resp azfake.PagerResponder[armcompute.SSHPublicKeysClientListBySubscriptionResponse]) + + // Update is the fake for method SSHPublicKeysClient.Update + // HTTP status codes to indicate success: http.StatusOK + Update func(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters armcompute.SSHPublicKeyUpdateResource, options *armcompute.SSHPublicKeysClientUpdateOptions) (resp azfake.Responder[armcompute.SSHPublicKeysClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewSSHPublicKeysServerTransport creates a new instance of SSHPublicKeysServerTransport with the provided implementation. +// The returned SSHPublicKeysServerTransport instance is connected to an instance of armcompute.SSHPublicKeysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSSHPublicKeysServerTransport(srv *SSHPublicKeysServer) *SSHPublicKeysServerTransport { + return &SSHPublicKeysServerTransport{ + srv: srv, + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armcompute.SSHPublicKeysClientListByResourceGroupResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armcompute.SSHPublicKeysClientListBySubscriptionResponse]](), + } +} + +// SSHPublicKeysServerTransport connects instances of armcompute.SSHPublicKeysClient to instances of SSHPublicKeysServer. +// Don't use this type directly, use NewSSHPublicKeysServerTransport instead. +type SSHPublicKeysServerTransport struct { + srv *SSHPublicKeysServer + newListByResourceGroupPager *tracker[azfake.PagerResponder[armcompute.SSHPublicKeysClientListByResourceGroupResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armcompute.SSHPublicKeysClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for SSHPublicKeysServerTransport. +func (s *SSHPublicKeysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SSHPublicKeysClient.Create": + resp, err = s.dispatchCreate(req) + case "SSHPublicKeysClient.Delete": + resp, err = s.dispatchDelete(req) + case "SSHPublicKeysClient.GenerateKeyPair": + resp, err = s.dispatchGenerateKeyPair(req) + case "SSHPublicKeysClient.Get": + resp, err = s.dispatchGet(req) + case "SSHPublicKeysClient.NewListByResourceGroupPager": + resp, err = s.dispatchNewListByResourceGroupPager(req) + case "SSHPublicKeysClient.NewListBySubscriptionPager": + resp, err = s.dispatchNewListBySubscriptionPager(req) + case "SSHPublicKeysClient.Update": + resp, err = s.dispatchUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SSHPublicKeysServerTransport) dispatchCreate(req *http.Request) (*http.Response, error) { + if s.srv.Create == nil { + return nil, &nonRetriableError{errors.New("fake for method Create not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/sshPublicKeys/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.SSHPublicKeyResource](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + sshPublicKeyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("sshPublicKeyName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Create(req.Context(), resourceGroupNameParam, sshPublicKeyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SSHPublicKeyResource, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SSHPublicKeysServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if s.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/sshPublicKeys/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + sshPublicKeyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("sshPublicKeyName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Delete(req.Context(), resourceGroupNameParam, sshPublicKeyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SSHPublicKeysServerTransport) dispatchGenerateKeyPair(req *http.Request) (*http.Response, error) { + if s.srv.GenerateKeyPair == nil { + return nil, &nonRetriableError{errors.New("fake for method GenerateKeyPair not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/sshPublicKeys/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/generateKeyPair` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.SSHGenerateKeyPairInputParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + sshPublicKeyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("sshPublicKeyName")]) + if err != nil { + return nil, err + } + var options *armcompute.SSHPublicKeysClientGenerateKeyPairOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.SSHPublicKeysClientGenerateKeyPairOptions{ + Parameters: &body, + } + } + respr, errRespr := s.srv.GenerateKeyPair(req.Context(), resourceGroupNameParam, sshPublicKeyNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SSHPublicKeyGenerateKeyPairResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SSHPublicKeysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/sshPublicKeys/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + sshPublicKeyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("sshPublicKeyName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, sshPublicKeyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SSHPublicKeyResource, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SSHPublicKeysServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := s.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/sshPublicKeys` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + s.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armcompute.SSHPublicKeysClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + s.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (s *SSHPublicKeysServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := s.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/sshPublicKeys` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := s.srv.NewListBySubscriptionPager(nil) + newListBySubscriptionPager = &resp + s.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armcompute.SSHPublicKeysClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + s.newListBySubscriptionPager.remove(req) + } + return resp, nil +} + +func (s *SSHPublicKeysServerTransport) dispatchUpdate(req *http.Request) (*http.Response, error) { + if s.srv.Update == nil { + return nil, &nonRetriableError{errors.New("fake for method Update not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/sshPublicKeys/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.SSHPublicKeyUpdateResource](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + sshPublicKeyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("sshPublicKeyName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Update(req.Context(), resourceGroupNameParam, sshPublicKeyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SSHPublicKeyResource, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/time_rfc3339.go new file mode 100644 index 00000000000..81f308b0d34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/time_rfc3339.go @@ -0,0 +1,110 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "regexp" + "strings" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) + +const ( + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` +) + +type dateTimeRFC3339 time.Time + +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { + layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { + layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = dateTimeRFC3339(p) + return err +} + +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} + +func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*dateTimeRFC3339)(t) +} + +func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || string(data) == "null" { + return nil + } + var aux dateTimeRFC3339 + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/usage_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/usage_server.go new file mode 100644 index 00000000000..a8115a02f8f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/usage_server.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// UsageServer is a fake server for instances of the armcompute.UsageClient type. +type UsageServer struct { + // NewListPager is the fake for method UsageClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armcompute.UsageClientListOptions) (resp azfake.PagerResponder[armcompute.UsageClientListResponse]) +} + +// NewUsageServerTransport creates a new instance of UsageServerTransport with the provided implementation. +// The returned UsageServerTransport instance is connected to an instance of armcompute.UsageClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewUsageServerTransport(srv *UsageServer) *UsageServerTransport { + return &UsageServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.UsageClientListResponse]](), + } +} + +// UsageServerTransport connects instances of armcompute.UsageClient to instances of UsageServer. +// Don't use this type directly, use NewUsageServerTransport instead. +type UsageServerTransport struct { + srv *UsageServer + newListPager *tracker[azfake.PagerResponder[armcompute.UsageClientListResponse]] +} + +// Do implements the policy.Transporter interface for UsageServerTransport. +func (u *UsageServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "UsageClient.NewListPager": + resp, err = u.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (u *UsageServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if u.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := u.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/usages` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := u.srv.NewListPager(locationParam, nil) + newListPager = &resp + u.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.UsageClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + u.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + u.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineextensionimages_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineextensionimages_server.go new file mode 100644 index 00000000000..34fa4ceae6b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineextensionimages_server.go @@ -0,0 +1,224 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// VirtualMachineExtensionImagesServer is a fake server for instances of the armcompute.VirtualMachineExtensionImagesClient type. +type VirtualMachineExtensionImagesServer struct { + // Get is the fake for method VirtualMachineExtensionImagesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, publisherName string, typeParam string, version string, options *armcompute.VirtualMachineExtensionImagesClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineExtensionImagesClientGetResponse], errResp azfake.ErrorResponder) + + // ListTypes is the fake for method VirtualMachineExtensionImagesClient.ListTypes + // HTTP status codes to indicate success: http.StatusOK + ListTypes func(ctx context.Context, location string, publisherName string, options *armcompute.VirtualMachineExtensionImagesClientListTypesOptions) (resp azfake.Responder[armcompute.VirtualMachineExtensionImagesClientListTypesResponse], errResp azfake.ErrorResponder) + + // ListVersions is the fake for method VirtualMachineExtensionImagesClient.ListVersions + // HTTP status codes to indicate success: http.StatusOK + ListVersions func(ctx context.Context, location string, publisherName string, typeParam string, options *armcompute.VirtualMachineExtensionImagesClientListVersionsOptions) (resp azfake.Responder[armcompute.VirtualMachineExtensionImagesClientListVersionsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineExtensionImagesServerTransport creates a new instance of VirtualMachineExtensionImagesServerTransport with the provided implementation. +// The returned VirtualMachineExtensionImagesServerTransport instance is connected to an instance of armcompute.VirtualMachineExtensionImagesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineExtensionImagesServerTransport(srv *VirtualMachineExtensionImagesServer) *VirtualMachineExtensionImagesServerTransport { + return &VirtualMachineExtensionImagesServerTransport{srv: srv} +} + +// VirtualMachineExtensionImagesServerTransport connects instances of armcompute.VirtualMachineExtensionImagesClient to instances of VirtualMachineExtensionImagesServer. +// Don't use this type directly, use NewVirtualMachineExtensionImagesServerTransport instead. +type VirtualMachineExtensionImagesServerTransport struct { + srv *VirtualMachineExtensionImagesServer +} + +// Do implements the policy.Transporter interface for VirtualMachineExtensionImagesServerTransport. +func (v *VirtualMachineExtensionImagesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineExtensionImagesClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineExtensionImagesClient.ListTypes": + resp, err = v.dispatchListTypes(req) + case "VirtualMachineExtensionImagesClient.ListVersions": + resp, err = v.dispatchListVersions(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineExtensionImagesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmextension/types/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + typeParamParam, err := url.PathUnescape(matches[regex.SubexpIndex("type")]) + if err != nil { + return nil, err + } + versionParam, err := url.PathUnescape(matches[regex.SubexpIndex("version")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), locationParam, publisherNameParam, typeParamParam, versionParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineExtensionImage, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineExtensionImagesServerTransport) dispatchListTypes(req *http.Request) (*http.Response, error) { + if v.srv.ListTypes == nil { + return nil, &nonRetriableError{errors.New("fake for method ListTypes not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmextension/types` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListTypes(req.Context(), locationParam, publisherNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineExtensionImageArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineExtensionImagesServerTransport) dispatchListVersions(req *http.Request) (*http.Response, error) { + if v.srv.ListVersions == nil { + return nil, &nonRetriableError{errors.New("fake for method ListVersions not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmextension/types/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + typeParamParam, err := url.PathUnescape(matches[regex.SubexpIndex("type")]) + if err != nil { + return nil, err + } + filterUnescaped, err := url.QueryUnescape(qp.Get("$filter")) + if err != nil { + return nil, err + } + filterParam := getOptional(filterUnescaped) + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + orderbyUnescaped, err := url.QueryUnescape(qp.Get("$orderby")) + if err != nil { + return nil, err + } + orderbyParam := getOptional(orderbyUnescaped) + var options *armcompute.VirtualMachineExtensionImagesClientListVersionsOptions + if filterParam != nil || topParam != nil || orderbyParam != nil { + options = &armcompute.VirtualMachineExtensionImagesClientListVersionsOptions{ + Filter: filterParam, + Top: topParam, + Orderby: orderbyParam, + } + } + respr, errRespr := v.srv.ListVersions(req.Context(), locationParam, publisherNameParam, typeParamParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineExtensionImageArray, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineextensions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineextensions_server.go new file mode 100644 index 00000000000..1e1c4dd7f0b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineextensions_server.go @@ -0,0 +1,345 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// VirtualMachineExtensionsServer is a fake server for instances of the armcompute.VirtualMachineExtensionsClient type. +type VirtualMachineExtensionsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualMachineExtensionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters armcompute.VirtualMachineExtension, options *armcompute.VirtualMachineExtensionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachineExtensionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *armcompute.VirtualMachineExtensionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachineExtensionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *armcompute.VirtualMachineExtensionsClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineExtensionsClientGetResponse], errResp azfake.ErrorResponder) + + // List is the fake for method VirtualMachineExtensionsClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachineExtensionsClientListOptions) (resp azfake.Responder[armcompute.VirtualMachineExtensionsClientListResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method VirtualMachineExtensionsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters armcompute.VirtualMachineExtensionUpdate, options *armcompute.VirtualMachineExtensionsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineExtensionsServerTransport creates a new instance of VirtualMachineExtensionsServerTransport with the provided implementation. +// The returned VirtualMachineExtensionsServerTransport instance is connected to an instance of armcompute.VirtualMachineExtensionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineExtensionsServerTransport(srv *VirtualMachineExtensionsServer) *VirtualMachineExtensionsServerTransport { + return &VirtualMachineExtensionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientDeleteResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientUpdateResponse]](), + } +} + +// VirtualMachineExtensionsServerTransport connects instances of armcompute.VirtualMachineExtensionsClient to instances of VirtualMachineExtensionsServer. +// Don't use this type directly, use NewVirtualMachineExtensionsServerTransport instead. +type VirtualMachineExtensionsServerTransport struct { + srv *VirtualMachineExtensionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientDeleteResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineExtensionsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineExtensionsServerTransport. +func (v *VirtualMachineExtensionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineExtensionsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualMachineExtensionsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachineExtensionsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineExtensionsClient.List": + resp, err = v.dispatchList(req) + case "VirtualMachineExtensionsClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineExtensionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineExtension](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vmNameParam, vmExtensionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineExtensionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmNameParam, vmExtensionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineExtensionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineExtensionsClientGetOptions + if expandParam != nil { + options = &armcompute.VirtualMachineExtensionsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vmNameParam, vmExtensionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineExtension, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineExtensionsServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if v.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineExtensionsClientListOptions + if expandParam != nil { + options = &armcompute.VirtualMachineExtensionsClientListOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.List(req.Context(), resourceGroupNameParam, vmNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineExtensionsListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineExtensionsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineExtensionUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmNameParam, vmExtensionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineimages_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineimages_server.go new file mode 100644 index 00000000000..ed87ad17643 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineimages_server.go @@ -0,0 +1,349 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// VirtualMachineImagesServer is a fake server for instances of the armcompute.VirtualMachineImagesClient type. +type VirtualMachineImagesServer struct { + // Get is the fake for method VirtualMachineImagesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, publisherName string, offer string, skus string, version string, options *armcompute.VirtualMachineImagesClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesClientGetResponse], errResp azfake.ErrorResponder) + + // List is the fake for method VirtualMachineImagesClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, location string, publisherName string, offer string, skus string, options *armcompute.VirtualMachineImagesClientListOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesClientListResponse], errResp azfake.ErrorResponder) + + // ListByEdgeZone is the fake for method VirtualMachineImagesClient.ListByEdgeZone + // HTTP status codes to indicate success: http.StatusOK + ListByEdgeZone func(ctx context.Context, location string, edgeZone string, options *armcompute.VirtualMachineImagesClientListByEdgeZoneOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesClientListByEdgeZoneResponse], errResp azfake.ErrorResponder) + + // ListOffers is the fake for method VirtualMachineImagesClient.ListOffers + // HTTP status codes to indicate success: http.StatusOK + ListOffers func(ctx context.Context, location string, publisherName string, options *armcompute.VirtualMachineImagesClientListOffersOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesClientListOffersResponse], errResp azfake.ErrorResponder) + + // ListPublishers is the fake for method VirtualMachineImagesClient.ListPublishers + // HTTP status codes to indicate success: http.StatusOK + ListPublishers func(ctx context.Context, location string, options *armcompute.VirtualMachineImagesClientListPublishersOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesClientListPublishersResponse], errResp azfake.ErrorResponder) + + // ListSKUs is the fake for method VirtualMachineImagesClient.ListSKUs + // HTTP status codes to indicate success: http.StatusOK + ListSKUs func(ctx context.Context, location string, publisherName string, offer string, options *armcompute.VirtualMachineImagesClientListSKUsOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesClientListSKUsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineImagesServerTransport creates a new instance of VirtualMachineImagesServerTransport with the provided implementation. +// The returned VirtualMachineImagesServerTransport instance is connected to an instance of armcompute.VirtualMachineImagesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineImagesServerTransport(srv *VirtualMachineImagesServer) *VirtualMachineImagesServerTransport { + return &VirtualMachineImagesServerTransport{srv: srv} +} + +// VirtualMachineImagesServerTransport connects instances of armcompute.VirtualMachineImagesClient to instances of VirtualMachineImagesServer. +// Don't use this type directly, use NewVirtualMachineImagesServerTransport instead. +type VirtualMachineImagesServerTransport struct { + srv *VirtualMachineImagesServer +} + +// Do implements the policy.Transporter interface for VirtualMachineImagesServerTransport. +func (v *VirtualMachineImagesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineImagesClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineImagesClient.List": + resp, err = v.dispatchList(req) + case "VirtualMachineImagesClient.ListByEdgeZone": + resp, err = v.dispatchListByEdgeZone(req) + case "VirtualMachineImagesClient.ListOffers": + resp, err = v.dispatchListOffers(req) + case "VirtualMachineImagesClient.ListPublishers": + resp, err = v.dispatchListPublishers(req) + case "VirtualMachineImagesClient.ListSKUs": + resp, err = v.dispatchListSKUs(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineImagesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/skus/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + offerParam, err := url.PathUnescape(matches[regex.SubexpIndex("offer")]) + if err != nil { + return nil, err + } + skusParam, err := url.PathUnescape(matches[regex.SubexpIndex("skus")]) + if err != nil { + return nil, err + } + versionParam, err := url.PathUnescape(matches[regex.SubexpIndex("version")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), locationParam, publisherNameParam, offerParam, skusParam, versionParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImage, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if v.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/skus/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + offerParam, err := url.PathUnescape(matches[regex.SubexpIndex("offer")]) + if err != nil { + return nil, err + } + skusParam, err := url.PathUnescape(matches[regex.SubexpIndex("skus")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + orderbyUnescaped, err := url.QueryUnescape(qp.Get("$orderby")) + if err != nil { + return nil, err + } + orderbyParam := getOptional(orderbyUnescaped) + var options *armcompute.VirtualMachineImagesClientListOptions + if expandParam != nil || topParam != nil || orderbyParam != nil { + options = &armcompute.VirtualMachineImagesClientListOptions{ + Expand: expandParam, + Top: topParam, + Orderby: orderbyParam, + } + } + respr, errRespr := v.srv.List(req.Context(), locationParam, publisherNameParam, offerParam, skusParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesServerTransport) dispatchListByEdgeZone(req *http.Request) (*http.Response, error) { + if v.srv.ListByEdgeZone == nil { + return nil, &nonRetriableError{errors.New("fake for method ListByEdgeZone not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/edgeZones/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vmimages` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + edgeZoneParam, err := url.PathUnescape(matches[regex.SubexpIndex("edgeZone")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListByEdgeZone(req.Context(), locationParam, edgeZoneParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VMImagesInEdgeZoneListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesServerTransport) dispatchListOffers(req *http.Request) (*http.Response, error) { + if v.srv.ListOffers == nil { + return nil, &nonRetriableError{errors.New("fake for method ListOffers not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListOffers(req.Context(), locationParam, publisherNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesServerTransport) dispatchListPublishers(req *http.Request) (*http.Response, error) { + if v.srv.ListPublishers == nil { + return nil, &nonRetriableError{errors.New("fake for method ListPublishers not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListPublishers(req.Context(), locationParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesServerTransport) dispatchListSKUs(req *http.Request) (*http.Response, error) { + if v.srv.ListSKUs == nil { + return nil, &nonRetriableError{errors.New("fake for method ListSKUs not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/skus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + offerParam, err := url.PathUnescape(matches[regex.SubexpIndex("offer")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListSKUs(req.Context(), locationParam, publisherNameParam, offerParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineimagesedgezone_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineimagesedgezone_server.go new file mode 100644 index 00000000000..68814a942c5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineimagesedgezone_server.go @@ -0,0 +1,330 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// VirtualMachineImagesEdgeZoneServer is a fake server for instances of the armcompute.VirtualMachineImagesEdgeZoneClient type. +type VirtualMachineImagesEdgeZoneServer struct { + // Get is the fake for method VirtualMachineImagesEdgeZoneClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, version string, options *armcompute.VirtualMachineImagesEdgeZoneClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesEdgeZoneClientGetResponse], errResp azfake.ErrorResponder) + + // List is the fake for method VirtualMachineImagesEdgeZoneClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, options *armcompute.VirtualMachineImagesEdgeZoneClientListOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesEdgeZoneClientListResponse], errResp azfake.ErrorResponder) + + // ListOffers is the fake for method VirtualMachineImagesEdgeZoneClient.ListOffers + // HTTP status codes to indicate success: http.StatusOK + ListOffers func(ctx context.Context, location string, edgeZone string, publisherName string, options *armcompute.VirtualMachineImagesEdgeZoneClientListOffersOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesEdgeZoneClientListOffersResponse], errResp azfake.ErrorResponder) + + // ListPublishers is the fake for method VirtualMachineImagesEdgeZoneClient.ListPublishers + // HTTP status codes to indicate success: http.StatusOK + ListPublishers func(ctx context.Context, location string, edgeZone string, options *armcompute.VirtualMachineImagesEdgeZoneClientListPublishersOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesEdgeZoneClientListPublishersResponse], errResp azfake.ErrorResponder) + + // ListSKUs is the fake for method VirtualMachineImagesEdgeZoneClient.ListSKUs + // HTTP status codes to indicate success: http.StatusOK + ListSKUs func(ctx context.Context, location string, edgeZone string, publisherName string, offer string, options *armcompute.VirtualMachineImagesEdgeZoneClientListSKUsOptions) (resp azfake.Responder[armcompute.VirtualMachineImagesEdgeZoneClientListSKUsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineImagesEdgeZoneServerTransport creates a new instance of VirtualMachineImagesEdgeZoneServerTransport with the provided implementation. +// The returned VirtualMachineImagesEdgeZoneServerTransport instance is connected to an instance of armcompute.VirtualMachineImagesEdgeZoneClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineImagesEdgeZoneServerTransport(srv *VirtualMachineImagesEdgeZoneServer) *VirtualMachineImagesEdgeZoneServerTransport { + return &VirtualMachineImagesEdgeZoneServerTransport{srv: srv} +} + +// VirtualMachineImagesEdgeZoneServerTransport connects instances of armcompute.VirtualMachineImagesEdgeZoneClient to instances of VirtualMachineImagesEdgeZoneServer. +// Don't use this type directly, use NewVirtualMachineImagesEdgeZoneServerTransport instead. +type VirtualMachineImagesEdgeZoneServerTransport struct { + srv *VirtualMachineImagesEdgeZoneServer +} + +// Do implements the policy.Transporter interface for VirtualMachineImagesEdgeZoneServerTransport. +func (v *VirtualMachineImagesEdgeZoneServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineImagesEdgeZoneClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineImagesEdgeZoneClient.List": + resp, err = v.dispatchList(req) + case "VirtualMachineImagesEdgeZoneClient.ListOffers": + resp, err = v.dispatchListOffers(req) + case "VirtualMachineImagesEdgeZoneClient.ListPublishers": + resp, err = v.dispatchListPublishers(req) + case "VirtualMachineImagesEdgeZoneClient.ListSKUs": + resp, err = v.dispatchListSKUs(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineImagesEdgeZoneServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/edgeZones/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/skus/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 7 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + edgeZoneParam, err := url.PathUnescape(matches[regex.SubexpIndex("edgeZone")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + offerParam, err := url.PathUnescape(matches[regex.SubexpIndex("offer")]) + if err != nil { + return nil, err + } + skusParam, err := url.PathUnescape(matches[regex.SubexpIndex("skus")]) + if err != nil { + return nil, err + } + versionParam, err := url.PathUnescape(matches[regex.SubexpIndex("version")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), locationParam, edgeZoneParam, publisherNameParam, offerParam, skusParam, versionParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImage, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesEdgeZoneServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if v.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/edgeZones/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/skus/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/versions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + edgeZoneParam, err := url.PathUnescape(matches[regex.SubexpIndex("edgeZone")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + offerParam, err := url.PathUnescape(matches[regex.SubexpIndex("offer")]) + if err != nil { + return nil, err + } + skusParam, err := url.PathUnescape(matches[regex.SubexpIndex("skus")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + orderbyUnescaped, err := url.QueryUnescape(qp.Get("$orderby")) + if err != nil { + return nil, err + } + orderbyParam := getOptional(orderbyUnescaped) + var options *armcompute.VirtualMachineImagesEdgeZoneClientListOptions + if expandParam != nil || topParam != nil || orderbyParam != nil { + options = &armcompute.VirtualMachineImagesEdgeZoneClientListOptions{ + Expand: expandParam, + Top: topParam, + Orderby: orderbyParam, + } + } + respr, errRespr := v.srv.List(req.Context(), locationParam, edgeZoneParam, publisherNameParam, offerParam, skusParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesEdgeZoneServerTransport) dispatchListOffers(req *http.Request) (*http.Response, error) { + if v.srv.ListOffers == nil { + return nil, &nonRetriableError{errors.New("fake for method ListOffers not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/edgeZones/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + edgeZoneParam, err := url.PathUnescape(matches[regex.SubexpIndex("edgeZone")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListOffers(req.Context(), locationParam, edgeZoneParam, publisherNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesEdgeZoneServerTransport) dispatchListPublishers(req *http.Request) (*http.Response, error) { + if v.srv.ListPublishers == nil { + return nil, &nonRetriableError{errors.New("fake for method ListPublishers not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/edgeZones/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + edgeZoneParam, err := url.PathUnescape(matches[regex.SubexpIndex("edgeZone")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListPublishers(req.Context(), locationParam, edgeZoneParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineImagesEdgeZoneServerTransport) dispatchListSKUs(req *http.Request) (*http.Response, error) { + if v.srv.ListSKUs == nil { + return nil, &nonRetriableError{errors.New("fake for method ListSKUs not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/edgeZones/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publishers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/artifacttypes/vmimage/offers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/skus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + edgeZoneParam, err := url.PathUnescape(matches[regex.SubexpIndex("edgeZone")]) + if err != nil { + return nil, err + } + publisherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publisherName")]) + if err != nil { + return nil, err + } + offerParam, err := url.PathUnescape(matches[regex.SubexpIndex("offer")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ListSKUs(req.Context(), locationParam, edgeZoneParam, publisherNameParam, offerParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineImageResourceArray, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineruncommands_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineruncommands_server.go new file mode 100644 index 00000000000..c6da237e5cd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachineruncommands_server.go @@ -0,0 +1,440 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// VirtualMachineRunCommandsServer is a fake server for instances of the armcompute.VirtualMachineRunCommandsClient type. +type VirtualMachineRunCommandsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualMachineRunCommandsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand armcompute.VirtualMachineRunCommand, options *armcompute.VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachineRunCommandsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *armcompute.VirtualMachineRunCommandsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachineRunCommandsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, commandID string, options *armcompute.VirtualMachineRunCommandsClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineRunCommandsClientGetResponse], errResp azfake.ErrorResponder) + + // GetByVirtualMachine is the fake for method VirtualMachineRunCommandsClient.GetByVirtualMachine + // HTTP status codes to indicate success: http.StatusOK + GetByVirtualMachine func(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *armcompute.VirtualMachineRunCommandsClientGetByVirtualMachineOptions) (resp azfake.Responder[armcompute.VirtualMachineRunCommandsClientGetByVirtualMachineResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualMachineRunCommandsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armcompute.VirtualMachineRunCommandsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineRunCommandsClientListResponse]) + + // NewListByVirtualMachinePager is the fake for method VirtualMachineRunCommandsClient.NewListByVirtualMachinePager + // HTTP status codes to indicate success: http.StatusOK + NewListByVirtualMachinePager func(resourceGroupName string, vmName string, options *armcompute.VirtualMachineRunCommandsClientListByVirtualMachineOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineRunCommandsClientListByVirtualMachineResponse]) + + // BeginUpdate is the fake for method VirtualMachineRunCommandsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand armcompute.VirtualMachineRunCommandUpdate, options *armcompute.VirtualMachineRunCommandsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineRunCommandsServerTransport creates a new instance of VirtualMachineRunCommandsServerTransport with the provided implementation. +// The returned VirtualMachineRunCommandsServerTransport instance is connected to an instance of armcompute.VirtualMachineRunCommandsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineRunCommandsServerTransport(srv *VirtualMachineRunCommandsServer) *VirtualMachineRunCommandsServerTransport { + return &VirtualMachineRunCommandsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineRunCommandsClientListResponse]](), + newListByVirtualMachinePager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineRunCommandsClientListByVirtualMachineResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientUpdateResponse]](), + } +} + +// VirtualMachineRunCommandsServerTransport connects instances of armcompute.VirtualMachineRunCommandsClient to instances of VirtualMachineRunCommandsServer. +// Don't use this type directly, use NewVirtualMachineRunCommandsServerTransport instead. +type VirtualMachineRunCommandsServerTransport struct { + srv *VirtualMachineRunCommandsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineRunCommandsClientListResponse]] + newListByVirtualMachinePager *tracker[azfake.PagerResponder[armcompute.VirtualMachineRunCommandsClientListByVirtualMachineResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineRunCommandsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineRunCommandsServerTransport. +func (v *VirtualMachineRunCommandsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineRunCommandsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualMachineRunCommandsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachineRunCommandsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineRunCommandsClient.GetByVirtualMachine": + resp, err = v.dispatchGetByVirtualMachine(req) + case "VirtualMachineRunCommandsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualMachineRunCommandsClient.NewListByVirtualMachinePager": + resp, err = v.dispatchNewListByVirtualMachinePager(req) + case "VirtualMachineRunCommandsClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineRunCommandsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineRunCommand](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vmNameParam, runCommandNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineRunCommandsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmNameParam, runCommandNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineRunCommandsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + commandIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("commandId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), locationParam, commandIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RunCommandDocument, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineRunCommandsServerTransport) dispatchGetByVirtualMachine(req *http.Request) (*http.Response, error) { + if v.srv.GetByVirtualMachine == nil { + return nil, &nonRetriableError{errors.New("fake for method GetByVirtualMachine not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineRunCommandsClientGetByVirtualMachineOptions + if expandParam != nil { + options = &armcompute.VirtualMachineRunCommandsClientGetByVirtualMachineOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.GetByVirtualMachine(req.Context(), resourceGroupNameParam, vmNameParam, runCommandNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineRunCommand, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineRunCommandsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(locationParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.VirtualMachineRunCommandsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineRunCommandsServerTransport) dispatchNewListByVirtualMachinePager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByVirtualMachinePager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByVirtualMachinePager not implemented")} + } + newListByVirtualMachinePager := v.newListByVirtualMachinePager.get(req) + if newListByVirtualMachinePager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineRunCommandsClientListByVirtualMachineOptions + if expandParam != nil { + options = &armcompute.VirtualMachineRunCommandsClientListByVirtualMachineOptions{ + Expand: expandParam, + } + } + resp := v.srv.NewListByVirtualMachinePager(resourceGroupNameParam, vmNameParam, options) + newListByVirtualMachinePager = &resp + v.newListByVirtualMachinePager.add(req, newListByVirtualMachinePager) + server.PagerResponderInjectNextLinks(newListByVirtualMachinePager, req, func(page *armcompute.VirtualMachineRunCommandsClientListByVirtualMachineResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByVirtualMachinePager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByVirtualMachinePager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByVirtualMachinePager) { + v.newListByVirtualMachinePager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineRunCommandsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineRunCommandUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmNameParam, runCommandNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachines_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachines_server.go new file mode 100644 index 00000000000..e1cd18f7fd8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachines_server.go @@ -0,0 +1,1494 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "reflect" + "regexp" + "strconv" +) + +// VirtualMachinesServer is a fake server for instances of the armcompute.VirtualMachinesClient type. +type VirtualMachinesServer struct { + // BeginAssessPatches is the fake for method VirtualMachinesClient.BeginAssessPatches + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginAssessPatches func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginAssessPatchesOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientAssessPatchesResponse], errResp azfake.ErrorResponder) + + // BeginAttachDetachDataDisks is the fake for method VirtualMachinesClient.BeginAttachDetachDataDisks + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginAttachDetachDataDisks func(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.AttachDetachDataDisksRequest, options *armcompute.VirtualMachinesClientBeginAttachDetachDataDisksOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientAttachDetachDataDisksResponse], errResp azfake.ErrorResponder) + + // BeginCapture is the fake for method VirtualMachinesClient.BeginCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCapture func(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachineCaptureParameters, options *armcompute.VirtualMachinesClientBeginCaptureOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientCaptureResponse], errResp azfake.ErrorResponder) + + // BeginConvertToManagedDisks is the fake for method VirtualMachinesClient.BeginConvertToManagedDisks + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginConvertToManagedDisks func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginConvertToManagedDisksOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientConvertToManagedDisksResponse], errResp azfake.ErrorResponder) + + // BeginCreateOrUpdate is the fake for method VirtualMachinesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachine, options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDeallocate is the fake for method VirtualMachinesClient.BeginDeallocate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDeallocate func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginDeallocateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientDeallocateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachinesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Generalize is the fake for method VirtualMachinesClient.Generalize + // HTTP status codes to indicate success: http.StatusOK + Generalize func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientGeneralizeOptions) (resp azfake.Responder[armcompute.VirtualMachinesClientGeneralizeResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachinesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachinesClientGetResponse], errResp azfake.ErrorResponder) + + // BeginInstallPatches is the fake for method VirtualMachinesClient.BeginInstallPatches + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginInstallPatches func(ctx context.Context, resourceGroupName string, vmName string, installPatchesInput armcompute.VirtualMachineInstallPatchesParameters, options *armcompute.VirtualMachinesClientBeginInstallPatchesOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientInstallPatchesResponse], errResp azfake.ErrorResponder) + + // InstanceView is the fake for method VirtualMachinesClient.InstanceView + // HTTP status codes to indicate success: http.StatusOK + InstanceView func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientInstanceViewOptions) (resp azfake.Responder[armcompute.VirtualMachinesClientInstanceViewResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualMachinesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armcompute.VirtualMachinesClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListResponse]) + + // NewListAllPager is the fake for method VirtualMachinesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) + + // NewListAvailableSizesPager is the fake for method VirtualMachinesClient.NewListAvailableSizesPager + // HTTP status codes to indicate success: http.StatusOK + NewListAvailableSizesPager func(resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientListAvailableSizesOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAvailableSizesResponse]) + + // NewListByLocationPager is the fake for method VirtualMachinesClient.NewListByLocationPager + // HTTP status codes to indicate success: http.StatusOK + NewListByLocationPager func(location string, options *armcompute.VirtualMachinesClientListByLocationOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListByLocationResponse]) + + // BeginPerformMaintenance is the fake for method VirtualMachinesClient.BeginPerformMaintenance + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPerformMaintenance func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginPerformMaintenanceOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientPerformMaintenanceResponse], errResp azfake.ErrorResponder) + + // BeginPowerOff is the fake for method VirtualMachinesClient.BeginPowerOff + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPowerOff func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginPowerOffOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientPowerOffResponse], errResp azfake.ErrorResponder) + + // BeginReapply is the fake for method VirtualMachinesClient.BeginReapply + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReapply func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginReapplyOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientReapplyResponse], errResp azfake.ErrorResponder) + + // BeginRedeploy is the fake for method VirtualMachinesClient.BeginRedeploy + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRedeploy func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginRedeployOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientRedeployResponse], errResp azfake.ErrorResponder) + + // BeginReimage is the fake for method VirtualMachinesClient.BeginReimage + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReimage func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginReimageOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientReimageResponse], errResp azfake.ErrorResponder) + + // BeginRestart is the fake for method VirtualMachinesClient.BeginRestart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRestart func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginRestartOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientRestartResponse], errResp azfake.ErrorResponder) + + // RetrieveBootDiagnosticsData is the fake for method VirtualMachinesClient.RetrieveBootDiagnosticsData + // HTTP status codes to indicate success: http.StatusOK + RetrieveBootDiagnosticsData func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientRetrieveBootDiagnosticsDataOptions) (resp azfake.Responder[armcompute.VirtualMachinesClientRetrieveBootDiagnosticsDataResponse], errResp azfake.ErrorResponder) + + // BeginRunCommand is the fake for method VirtualMachinesClient.BeginRunCommand + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRunCommand func(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.RunCommandInput, options *armcompute.VirtualMachinesClientBeginRunCommandOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientRunCommandResponse], errResp azfake.ErrorResponder) + + // SimulateEviction is the fake for method VirtualMachinesClient.SimulateEviction + // HTTP status codes to indicate success: http.StatusNoContent + SimulateEviction func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientSimulateEvictionOptions) (resp azfake.Responder[armcompute.VirtualMachinesClientSimulateEvictionResponse], errResp azfake.ErrorResponder) + + // BeginStart is the fake for method VirtualMachinesClient.BeginStart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStart func(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginStartOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientStartResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method VirtualMachinesClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachineUpdate, options *armcompute.VirtualMachinesClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachinesClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachinesServerTransport creates a new instance of VirtualMachinesServerTransport with the provided implementation. +// The returned VirtualMachinesServerTransport instance is connected to an instance of armcompute.VirtualMachinesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachinesServerTransport(srv *VirtualMachinesServer) *VirtualMachinesServerTransport { + return &VirtualMachinesServerTransport{ + srv: srv, + beginAssessPatches: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientAssessPatchesResponse]](), + beginAttachDetachDataDisks: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientAttachDetachDataDisksResponse]](), + beginCapture: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientCaptureResponse]](), + beginConvertToManagedDisks: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientConvertToManagedDisksResponse]](), + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientCreateOrUpdateResponse]](), + beginDeallocate: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientDeallocateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientDeleteResponse]](), + beginInstallPatches: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientInstallPatchesResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]](), + newListAvailableSizesPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListAvailableSizesResponse]](), + newListByLocationPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListByLocationResponse]](), + beginPerformMaintenance: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientPerformMaintenanceResponse]](), + beginPowerOff: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientPowerOffResponse]](), + beginReapply: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientReapplyResponse]](), + beginRedeploy: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientRedeployResponse]](), + beginReimage: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientReimageResponse]](), + beginRestart: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientRestartResponse]](), + beginRunCommand: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientRunCommandResponse]](), + beginStart: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientStartResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachinesClientUpdateResponse]](), + } +} + +// VirtualMachinesServerTransport connects instances of armcompute.VirtualMachinesClient to instances of VirtualMachinesServer. +// Don't use this type directly, use NewVirtualMachinesServerTransport instead. +type VirtualMachinesServerTransport struct { + srv *VirtualMachinesServer + beginAssessPatches *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientAssessPatchesResponse]] + beginAttachDetachDataDisks *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientAttachDetachDataDisksResponse]] + beginCapture *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientCaptureResponse]] + beginConvertToManagedDisks *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientConvertToManagedDisksResponse]] + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientCreateOrUpdateResponse]] + beginDeallocate *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientDeallocateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientDeleteResponse]] + beginInstallPatches *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientInstallPatchesResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]] + newListAvailableSizesPager *tracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListAvailableSizesResponse]] + newListByLocationPager *tracker[azfake.PagerResponder[armcompute.VirtualMachinesClientListByLocationResponse]] + beginPerformMaintenance *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientPerformMaintenanceResponse]] + beginPowerOff *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientPowerOffResponse]] + beginReapply *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientReapplyResponse]] + beginRedeploy *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientRedeployResponse]] + beginReimage *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientReimageResponse]] + beginRestart *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientRestartResponse]] + beginRunCommand *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientRunCommandResponse]] + beginStart *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientStartResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachinesClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachinesServerTransport. +func (v *VirtualMachinesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachinesClient.BeginAssessPatches": + resp, err = v.dispatchBeginAssessPatches(req) + case "VirtualMachinesClient.BeginAttachDetachDataDisks": + resp, err = v.dispatchBeginAttachDetachDataDisks(req) + case "VirtualMachinesClient.BeginCapture": + resp, err = v.dispatchBeginCapture(req) + case "VirtualMachinesClient.BeginConvertToManagedDisks": + resp, err = v.dispatchBeginConvertToManagedDisks(req) + case "VirtualMachinesClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualMachinesClient.BeginDeallocate": + resp, err = v.dispatchBeginDeallocate(req) + case "VirtualMachinesClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachinesClient.Generalize": + resp, err = v.dispatchGeneralize(req) + case "VirtualMachinesClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachinesClient.BeginInstallPatches": + resp, err = v.dispatchBeginInstallPatches(req) + case "VirtualMachinesClient.InstanceView": + resp, err = v.dispatchInstanceView(req) + case "VirtualMachinesClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualMachinesClient.NewListAllPager": + resp, err = v.dispatchNewListAllPager(req) + case "VirtualMachinesClient.NewListAvailableSizesPager": + resp, err = v.dispatchNewListAvailableSizesPager(req) + case "VirtualMachinesClient.NewListByLocationPager": + resp, err = v.dispatchNewListByLocationPager(req) + case "VirtualMachinesClient.BeginPerformMaintenance": + resp, err = v.dispatchBeginPerformMaintenance(req) + case "VirtualMachinesClient.BeginPowerOff": + resp, err = v.dispatchBeginPowerOff(req) + case "VirtualMachinesClient.BeginReapply": + resp, err = v.dispatchBeginReapply(req) + case "VirtualMachinesClient.BeginRedeploy": + resp, err = v.dispatchBeginRedeploy(req) + case "VirtualMachinesClient.BeginReimage": + resp, err = v.dispatchBeginReimage(req) + case "VirtualMachinesClient.BeginRestart": + resp, err = v.dispatchBeginRestart(req) + case "VirtualMachinesClient.RetrieveBootDiagnosticsData": + resp, err = v.dispatchRetrieveBootDiagnosticsData(req) + case "VirtualMachinesClient.BeginRunCommand": + resp, err = v.dispatchBeginRunCommand(req) + case "VirtualMachinesClient.SimulateEviction": + resp, err = v.dispatchSimulateEviction(req) + case "VirtualMachinesClient.BeginStart": + resp, err = v.dispatchBeginStart(req) + case "VirtualMachinesClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginAssessPatches(req *http.Request) (*http.Response, error) { + if v.srv.BeginAssessPatches == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginAssessPatches not implemented")} + } + beginAssessPatches := v.beginAssessPatches.get(req) + if beginAssessPatches == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/assessPatches` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginAssessPatches(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginAssessPatches = &respr + v.beginAssessPatches.add(req, beginAssessPatches) + } + + resp, err := server.PollerResponderNext(beginAssessPatches, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginAssessPatches.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginAssessPatches) { + v.beginAssessPatches.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginAttachDetachDataDisks(req *http.Request) (*http.Response, error) { + if v.srv.BeginAttachDetachDataDisks == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginAttachDetachDataDisks not implemented")} + } + beginAttachDetachDataDisks := v.beginAttachDetachDataDisks.get(req) + if beginAttachDetachDataDisks == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/attachDetachDataDisks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.AttachDetachDataDisksRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginAttachDetachDataDisks(req.Context(), resourceGroupNameParam, vmNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginAttachDetachDataDisks = &respr + v.beginAttachDetachDataDisks.add(req, beginAttachDetachDataDisks) + } + + resp, err := server.PollerResponderNext(beginAttachDetachDataDisks, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginAttachDetachDataDisks.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginAttachDetachDataDisks) { + v.beginAttachDetachDataDisks.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCapture not implemented")} + } + beginCapture := v.beginCapture.get(req) + if beginCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/capture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineCaptureParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCapture(req.Context(), resourceGroupNameParam, vmNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCapture = &respr + v.beginCapture.add(req, beginCapture) + } + + resp, err := server.PollerResponderNext(beginCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCapture) { + v.beginCapture.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginConvertToManagedDisks(req *http.Request) (*http.Response, error) { + if v.srv.BeginConvertToManagedDisks == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginConvertToManagedDisks not implemented")} + } + beginConvertToManagedDisks := v.beginConvertToManagedDisks.get(req) + if beginConvertToManagedDisks == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/convertToManagedDisks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginConvertToManagedDisks(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginConvertToManagedDisks = &respr + v.beginConvertToManagedDisks.add(req, beginConvertToManagedDisks) + } + + resp, err := server.PollerResponderNext(beginConvertToManagedDisks, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginConvertToManagedDisks.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginConvertToManagedDisks) { + v.beginConvertToManagedDisks.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachine](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + ifMatchParam := getOptional(getHeaderValue(req.Header, "If-Match")) + ifNoneMatchParam := getOptional(getHeaderValue(req.Header, "If-None-Match")) + var options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions + if ifMatchParam != nil || ifNoneMatchParam != nil { + options = &armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions{ + IfMatch: ifMatchParam, + IfNoneMatch: ifNoneMatchParam, + } + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vmNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginDeallocate(req *http.Request) (*http.Response, error) { + if v.srv.BeginDeallocate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeallocate not implemented")} + } + beginDeallocate := v.beginDeallocate.get(req) + if beginDeallocate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/deallocate` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + hibernateUnescaped, err := url.QueryUnescape(qp.Get("hibernate")) + if err != nil { + return nil, err + } + hibernateParam, err := parseOptional(hibernateUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachinesClientBeginDeallocateOptions + if hibernateParam != nil { + options = &armcompute.VirtualMachinesClientBeginDeallocateOptions{ + Hibernate: hibernateParam, + } + } + respr, errRespr := v.srv.BeginDeallocate(req.Context(), resourceGroupNameParam, vmNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeallocate = &respr + v.beginDeallocate.add(req, beginDeallocate) + } + + resp, err := server.PollerResponderNext(beginDeallocate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginDeallocate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeallocate) { + v.beginDeallocate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + forceDeletionUnescaped, err := url.QueryUnescape(qp.Get("forceDeletion")) + if err != nil { + return nil, err + } + forceDeletionParam, err := parseOptional(forceDeletionUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachinesClientBeginDeleteOptions + if forceDeletionParam != nil { + options = &armcompute.VirtualMachinesClientBeginDeleteOptions{ + ForceDeletion: forceDeletionParam, + } + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchGeneralize(req *http.Request) (*http.Response, error) { + if v.srv.Generalize == nil { + return nil, &nonRetriableError{errors.New("fake for method Generalize not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/generalize` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Generalize(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.InstanceViewTypes(expandUnescaped)) + var options *armcompute.VirtualMachinesClientGetOptions + if expandParam != nil { + options = &armcompute.VirtualMachinesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vmNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachine, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginInstallPatches(req *http.Request) (*http.Response, error) { + if v.srv.BeginInstallPatches == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginInstallPatches not implemented")} + } + beginInstallPatches := v.beginInstallPatches.get(req) + if beginInstallPatches == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/installPatches` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineInstallPatchesParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginInstallPatches(req.Context(), resourceGroupNameParam, vmNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginInstallPatches = &respr + v.beginInstallPatches.add(req, beginInstallPatches) + } + + resp, err := server.PollerResponderNext(beginInstallPatches, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginInstallPatches.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginInstallPatches) { + v.beginInstallPatches.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchInstanceView(req *http.Request) (*http.Response, error) { + if v.srv.InstanceView == nil { + return nil, &nonRetriableError{errors.New("fake for method InstanceView not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/instanceView` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.InstanceView(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineInstanceView, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + filterUnescaped, err := url.QueryUnescape(qp.Get("$filter")) + if err != nil { + return nil, err + } + filterParam := getOptional(filterUnescaped) + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.ExpandTypeForListVMs(expandUnescaped)) + var options *armcompute.VirtualMachinesClientListOptions + if filterParam != nil || expandParam != nil { + options = &armcompute.VirtualMachinesClientListOptions{ + Filter: filterParam, + Expand: expandParam, + } + } + resp := v.srv.NewListPager(resourceGroupNameParam, options) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.VirtualMachinesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := v.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + statusOnlyUnescaped, err := url.QueryUnescape(qp.Get("statusOnly")) + if err != nil { + return nil, err + } + statusOnlyParam := getOptional(statusOnlyUnescaped) + filterUnescaped, err := url.QueryUnescape(qp.Get("$filter")) + if err != nil { + return nil, err + } + filterParam := getOptional(filterUnescaped) + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.ExpandTypesForListVMs(expandUnescaped)) + var options *armcompute.VirtualMachinesClientListAllOptions + if statusOnlyParam != nil || filterParam != nil || expandParam != nil { + options = &armcompute.VirtualMachinesClientListAllOptions{ + StatusOnly: statusOnlyParam, + Filter: filterParam, + Expand: expandParam, + } + } + resp := v.srv.NewListAllPager(options) + newListAllPager = &resp + v.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armcompute.VirtualMachinesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + v.newListAllPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchNewListAvailableSizesPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListAvailableSizesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAvailableSizesPager not implemented")} + } + newListAvailableSizesPager := v.newListAvailableSizesPager.get(req) + if newListAvailableSizesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vmSizes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListAvailableSizesPager(resourceGroupNameParam, vmNameParam, nil) + newListAvailableSizesPager = &resp + v.newListAvailableSizesPager.add(req, newListAvailableSizesPager) + } + resp, err := server.PagerResponderNext(newListAvailableSizesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListAvailableSizesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAvailableSizesPager) { + v.newListAvailableSizesPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchNewListByLocationPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByLocationPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByLocationPager not implemented")} + } + newListByLocationPager := v.newListByLocationPager.get(req) + if newListByLocationPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByLocationPager(locationParam, nil) + newListByLocationPager = &resp + v.newListByLocationPager.add(req, newListByLocationPager) + server.PagerResponderInjectNextLinks(newListByLocationPager, req, func(page *armcompute.VirtualMachinesClientListByLocationResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByLocationPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByLocationPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByLocationPager) { + v.newListByLocationPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginPerformMaintenance(req *http.Request) (*http.Response, error) { + if v.srv.BeginPerformMaintenance == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPerformMaintenance not implemented")} + } + beginPerformMaintenance := v.beginPerformMaintenance.get(req) + if beginPerformMaintenance == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/performMaintenance` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginPerformMaintenance(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPerformMaintenance = &respr + v.beginPerformMaintenance.add(req, beginPerformMaintenance) + } + + resp, err := server.PollerResponderNext(beginPerformMaintenance, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginPerformMaintenance.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPerformMaintenance) { + v.beginPerformMaintenance.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginPowerOff(req *http.Request) (*http.Response, error) { + if v.srv.BeginPowerOff == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPowerOff not implemented")} + } + beginPowerOff := v.beginPowerOff.get(req) + if beginPowerOff == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/powerOff` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + skipShutdownUnescaped, err := url.QueryUnescape(qp.Get("skipShutdown")) + if err != nil { + return nil, err + } + skipShutdownParam, err := parseOptional(skipShutdownUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachinesClientBeginPowerOffOptions + if skipShutdownParam != nil { + options = &armcompute.VirtualMachinesClientBeginPowerOffOptions{ + SkipShutdown: skipShutdownParam, + } + } + respr, errRespr := v.srv.BeginPowerOff(req.Context(), resourceGroupNameParam, vmNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPowerOff = &respr + v.beginPowerOff.add(req, beginPowerOff) + } + + resp, err := server.PollerResponderNext(beginPowerOff, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginPowerOff.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPowerOff) { + v.beginPowerOff.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginReapply(req *http.Request) (*http.Response, error) { + if v.srv.BeginReapply == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReapply not implemented")} + } + beginReapply := v.beginReapply.get(req) + if beginReapply == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reapply` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginReapply(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReapply = &respr + v.beginReapply.add(req, beginReapply) + } + + resp, err := server.PollerResponderNext(beginReapply, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReapply.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReapply) { + v.beginReapply.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginRedeploy(req *http.Request) (*http.Response, error) { + if v.srv.BeginRedeploy == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRedeploy not implemented")} + } + beginRedeploy := v.beginRedeploy.get(req) + if beginRedeploy == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/redeploy` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginRedeploy(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRedeploy = &respr + v.beginRedeploy.add(req, beginRedeploy) + } + + resp, err := server.PollerResponderNext(beginRedeploy, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRedeploy.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRedeploy) { + v.beginRedeploy.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginReimage(req *http.Request) (*http.Response, error) { + if v.srv.BeginReimage == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReimage not implemented")} + } + beginReimage := v.beginReimage.get(req) + if beginReimage == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reimage` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineReimageParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachinesClientBeginReimageOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachinesClientBeginReimageOptions{ + Parameters: &body, + } + } + respr, errRespr := v.srv.BeginReimage(req.Context(), resourceGroupNameParam, vmNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReimage = &respr + v.beginReimage.add(req, beginReimage) + } + + resp, err := server.PollerResponderNext(beginReimage, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReimage.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReimage) { + v.beginReimage.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginRestart(req *http.Request) (*http.Response, error) { + if v.srv.BeginRestart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRestart not implemented")} + } + beginRestart := v.beginRestart.get(req) + if beginRestart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restart` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginRestart(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRestart = &respr + v.beginRestart.add(req, beginRestart) + } + + resp, err := server.PollerResponderNext(beginRestart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRestart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRestart) { + v.beginRestart.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchRetrieveBootDiagnosticsData(req *http.Request) (*http.Response, error) { + if v.srv.RetrieveBootDiagnosticsData == nil { + return nil, &nonRetriableError{errors.New("fake for method RetrieveBootDiagnosticsData not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/retrieveBootDiagnosticsData` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + sasURIExpirationTimeInMinutesUnescaped, err := url.QueryUnescape(qp.Get("sasUriExpirationTimeInMinutes")) + if err != nil { + return nil, err + } + sasURIExpirationTimeInMinutesParam, err := parseOptional(sasURIExpirationTimeInMinutesUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachinesClientRetrieveBootDiagnosticsDataOptions + if sasURIExpirationTimeInMinutesParam != nil { + options = &armcompute.VirtualMachinesClientRetrieveBootDiagnosticsDataOptions{ + SasURIExpirationTimeInMinutes: sasURIExpirationTimeInMinutesParam, + } + } + respr, errRespr := v.srv.RetrieveBootDiagnosticsData(req.Context(), resourceGroupNameParam, vmNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RetrieveBootDiagnosticsDataResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginRunCommand(req *http.Request) (*http.Response, error) { + if v.srv.BeginRunCommand == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRunCommand not implemented")} + } + beginRunCommand := v.beginRunCommand.get(req) + if beginRunCommand == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommand` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RunCommandInput](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginRunCommand(req.Context(), resourceGroupNameParam, vmNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRunCommand = &respr + v.beginRunCommand.add(req, beginRunCommand) + } + + resp, err := server.PollerResponderNext(beginRunCommand, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRunCommand.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRunCommand) { + v.beginRunCommand.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchSimulateEviction(req *http.Request) (*http.Response, error) { + if v.srv.SimulateEviction == nil { + return nil, &nonRetriableError{errors.New("fake for method SimulateEviction not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/simulateEviction` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.SimulateEviction(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginStart(req *http.Request) (*http.Response, error) { + if v.srv.BeginStart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStart not implemented")} + } + beginStart := v.beginStart.get(req) + if beginStart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/start` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginStart(req.Context(), resourceGroupNameParam, vmNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStart = &respr + v.beginStart.add(req, beginStart) + } + + resp, err := server.PollerResponderNext(beginStart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStart) { + v.beginStart.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachinesServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmName")]) + if err != nil { + return nil, err + } + ifMatchParam := getOptional(getHeaderValue(req.Header, "If-Match")) + ifNoneMatchParam := getOptional(getHeaderValue(req.Header, "If-None-Match")) + var options *armcompute.VirtualMachinesClientBeginUpdateOptions + if ifMatchParam != nil || ifNoneMatchParam != nil { + options = &armcompute.VirtualMachinesClientBeginUpdateOptions{ + IfMatch: ifMatchParam, + IfNoneMatch: ifNoneMatchParam, + } + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetextensions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetextensions_server.go new file mode 100644 index 00000000000..e1304ee2edf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetextensions_server.go @@ -0,0 +1,344 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// VirtualMachineScaleSetExtensionsServer is a fake server for instances of the armcompute.VirtualMachineScaleSetExtensionsClient type. +type VirtualMachineScaleSetExtensionsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters armcompute.VirtualMachineScaleSetExtension, options *armcompute.VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachineScaleSetExtensionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *armcompute.VirtualMachineScaleSetExtensionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachineScaleSetExtensionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *armcompute.VirtualMachineScaleSetExtensionsClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetExtensionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualMachineScaleSetExtensionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetExtensionsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetExtensionsClientListResponse]) + + // BeginUpdate is the fake for method VirtualMachineScaleSetExtensionsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters armcompute.VirtualMachineScaleSetExtensionUpdate, options *armcompute.VirtualMachineScaleSetExtensionsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineScaleSetExtensionsServerTransport creates a new instance of VirtualMachineScaleSetExtensionsServerTransport with the provided implementation. +// The returned VirtualMachineScaleSetExtensionsServerTransport instance is connected to an instance of armcompute.VirtualMachineScaleSetExtensionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineScaleSetExtensionsServerTransport(srv *VirtualMachineScaleSetExtensionsServer) *VirtualMachineScaleSetExtensionsServerTransport { + return &VirtualMachineScaleSetExtensionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetExtensionsClientListResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientUpdateResponse]](), + } +} + +// VirtualMachineScaleSetExtensionsServerTransport connects instances of armcompute.VirtualMachineScaleSetExtensionsClient to instances of VirtualMachineScaleSetExtensionsServer. +// Don't use this type directly, use NewVirtualMachineScaleSetExtensionsServerTransport instead. +type VirtualMachineScaleSetExtensionsServerTransport struct { + srv *VirtualMachineScaleSetExtensionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetExtensionsClientListResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetExtensionsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineScaleSetExtensionsServerTransport. +func (v *VirtualMachineScaleSetExtensionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualMachineScaleSetExtensionsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachineScaleSetExtensionsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineScaleSetExtensionsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualMachineScaleSetExtensionsClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetExtensionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetExtension](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + vmssExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmssExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, vmssExtensionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetExtensionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + vmssExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmssExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, vmssExtensionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetExtensionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + vmssExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmssExtensionName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineScaleSetExtensionsClientGetOptions + if expandParam != nil { + options = &armcompute.VirtualMachineScaleSetExtensionsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, vmssExtensionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineScaleSetExtension, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetExtensionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, vmScaleSetNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.VirtualMachineScaleSetExtensionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetExtensionsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetExtensionUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + vmssExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmssExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, vmssExtensionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetrollingupgrades_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetrollingupgrades_server.go new file mode 100644 index 00000000000..8648eb30d99 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetrollingupgrades_server.go @@ -0,0 +1,258 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// VirtualMachineScaleSetRollingUpgradesServer is a fake server for instances of the armcompute.VirtualMachineScaleSetRollingUpgradesClient type. +type VirtualMachineScaleSetRollingUpgradesServer struct { + // BeginCancel is the fake for method VirtualMachineScaleSetRollingUpgradesClient.BeginCancel + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCancel func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientCancelResponse], errResp azfake.ErrorResponder) + + // GetLatest is the fake for method VirtualMachineScaleSetRollingUpgradesClient.GetLatest + // HTTP status codes to indicate success: http.StatusOK + GetLatest func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse], errResp azfake.ErrorResponder) + + // BeginStartExtensionUpgrade is the fake for method VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStartExtensionUpgrade func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse], errResp azfake.ErrorResponder) + + // BeginStartOSUpgrade is the fake for method VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStartOSUpgrade func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineScaleSetRollingUpgradesServerTransport creates a new instance of VirtualMachineScaleSetRollingUpgradesServerTransport with the provided implementation. +// The returned VirtualMachineScaleSetRollingUpgradesServerTransport instance is connected to an instance of armcompute.VirtualMachineScaleSetRollingUpgradesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineScaleSetRollingUpgradesServerTransport(srv *VirtualMachineScaleSetRollingUpgradesServer) *VirtualMachineScaleSetRollingUpgradesServerTransport { + return &VirtualMachineScaleSetRollingUpgradesServerTransport{ + srv: srv, + beginCancel: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientCancelResponse]](), + beginStartExtensionUpgrade: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse]](), + beginStartOSUpgrade: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse]](), + } +} + +// VirtualMachineScaleSetRollingUpgradesServerTransport connects instances of armcompute.VirtualMachineScaleSetRollingUpgradesClient to instances of VirtualMachineScaleSetRollingUpgradesServer. +// Don't use this type directly, use NewVirtualMachineScaleSetRollingUpgradesServerTransport instead. +type VirtualMachineScaleSetRollingUpgradesServerTransport struct { + srv *VirtualMachineScaleSetRollingUpgradesServer + beginCancel *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientCancelResponse]] + beginStartExtensionUpgrade *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse]] + beginStartOSUpgrade *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineScaleSetRollingUpgradesServerTransport. +func (v *VirtualMachineScaleSetRollingUpgradesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineScaleSetRollingUpgradesClient.BeginCancel": + resp, err = v.dispatchBeginCancel(req) + case "VirtualMachineScaleSetRollingUpgradesClient.GetLatest": + resp, err = v.dispatchGetLatest(req) + case "VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade": + resp, err = v.dispatchBeginStartExtensionUpgrade(req) + case "VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade": + resp, err = v.dispatchBeginStartOSUpgrade(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetRollingUpgradesServerTransport) dispatchBeginCancel(req *http.Request) (*http.Response, error) { + if v.srv.BeginCancel == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCancel not implemented")} + } + beginCancel := v.beginCancel.get(req) + if beginCancel == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rollingUpgrades/cancel` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCancel(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCancel = &respr + v.beginCancel.add(req, beginCancel) + } + + resp, err := server.PollerResponderNext(beginCancel, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginCancel.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCancel) { + v.beginCancel.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetRollingUpgradesServerTransport) dispatchGetLatest(req *http.Request) (*http.Response, error) { + if v.srv.GetLatest == nil { + return nil, &nonRetriableError{errors.New("fake for method GetLatest not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rollingUpgrades/latest` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.GetLatest(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RollingUpgradeStatusInfo, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetRollingUpgradesServerTransport) dispatchBeginStartExtensionUpgrade(req *http.Request) (*http.Response, error) { + if v.srv.BeginStartExtensionUpgrade == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStartExtensionUpgrade not implemented")} + } + beginStartExtensionUpgrade := v.beginStartExtensionUpgrade.get(req) + if beginStartExtensionUpgrade == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensionRollingUpgrade` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginStartExtensionUpgrade(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStartExtensionUpgrade = &respr + v.beginStartExtensionUpgrade.add(req, beginStartExtensionUpgrade) + } + + resp, err := server.PollerResponderNext(beginStartExtensionUpgrade, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStartExtensionUpgrade.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStartExtensionUpgrade) { + v.beginStartExtensionUpgrade.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetRollingUpgradesServerTransport) dispatchBeginStartOSUpgrade(req *http.Request) (*http.Response, error) { + if v.srv.BeginStartOSUpgrade == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStartOSUpgrade not implemented")} + } + beginStartOSUpgrade := v.beginStartOSUpgrade.get(req) + if beginStartOSUpgrade == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/osRollingUpgrade` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginStartOSUpgrade(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStartOSUpgrade = &respr + v.beginStartOSUpgrade.add(req, beginStartOSUpgrade) + } + + resp, err := server.PollerResponderNext(beginStartOSUpgrade, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStartOSUpgrade.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStartOSUpgrade) { + v.beginStartOSUpgrade.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesets_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesets_server.go new file mode 100644 index 00000000000..9aa0a97db1b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesets_server.go @@ -0,0 +1,1509 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "reflect" + "regexp" + "strconv" +) + +// VirtualMachineScaleSetsServer is a fake server for instances of the armcompute.VirtualMachineScaleSetsClient type. +type VirtualMachineScaleSetsServer struct { + // BeginApproveRollingUpgrade is the fake for method VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade + // HTTP status codes to indicate success: http.StatusAccepted + BeginApproveRollingUpgrade func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientApproveRollingUpgradeResponse], errResp azfake.ErrorResponder) + + // ConvertToSinglePlacementGroup is the fake for method VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup + // HTTP status codes to indicate success: http.StatusOK + ConvertToSinglePlacementGroup func(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters armcompute.VMScaleSetConvertToSinglePlacementGroupInput, options *armcompute.VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse], errResp azfake.ErrorResponder) + + // BeginCreateOrUpdate is the fake for method VirtualMachineScaleSetsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters armcompute.VirtualMachineScaleSet, options *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDeallocate is the fake for method VirtualMachineScaleSetsClient.BeginDeallocate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDeallocate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginDeallocateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeallocateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachineScaleSetsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeleteResponse], errResp azfake.ErrorResponder) + + // BeginDeleteInstances is the fake for method VirtualMachineScaleSetsClient.BeginDeleteInstances + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDeleteInstances func(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, options *armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], errResp azfake.ErrorResponder) + + // ForceRecoveryServiceFabricPlatformUpdateDomainWalk is the fake for method VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk + // HTTP status codes to indicate success: http.StatusOK + ForceRecoveryServiceFabricPlatformUpdateDomainWalk func(ctx context.Context, resourceGroupName string, vmScaleSetName string, platformUpdateDomain int32, options *armcompute.VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachineScaleSetsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetsClientGetResponse], errResp azfake.ErrorResponder) + + // GetInstanceView is the fake for method VirtualMachineScaleSetsClient.GetInstanceView + // HTTP status codes to indicate success: http.StatusOK + GetInstanceView func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientGetInstanceViewOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetsClientGetInstanceViewResponse], errResp azfake.ErrorResponder) + + // NewGetOSUpgradeHistoryPager is the fake for method VirtualMachineScaleSetsClient.NewGetOSUpgradeHistoryPager + // HTTP status codes to indicate success: http.StatusOK + NewGetOSUpgradeHistoryPager func(resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse]) + + // NewListPager is the fake for method VirtualMachineScaleSetsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armcompute.VirtualMachineScaleSetsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListResponse]) + + // NewListAllPager is the fake for method VirtualMachineScaleSetsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) + + // NewListByLocationPager is the fake for method VirtualMachineScaleSetsClient.NewListByLocationPager + // HTTP status codes to indicate success: http.StatusOK + NewListByLocationPager func(location string, options *armcompute.VirtualMachineScaleSetsClientListByLocationOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListByLocationResponse]) + + // NewListSKUsPager is the fake for method VirtualMachineScaleSetsClient.NewListSKUsPager + // HTTP status codes to indicate success: http.StatusOK + NewListSKUsPager func(resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientListSKUsOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListSKUsResponse]) + + // BeginPerformMaintenance is the fake for method VirtualMachineScaleSetsClient.BeginPerformMaintenance + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPerformMaintenance func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientPerformMaintenanceResponse], errResp azfake.ErrorResponder) + + // BeginPowerOff is the fake for method VirtualMachineScaleSetsClient.BeginPowerOff + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPowerOff func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginPowerOffOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientPowerOffResponse], errResp azfake.ErrorResponder) + + // BeginReapply is the fake for method VirtualMachineScaleSetsClient.BeginReapply + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReapply func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginReapplyOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReapplyResponse], errResp azfake.ErrorResponder) + + // BeginRedeploy is the fake for method VirtualMachineScaleSetsClient.BeginRedeploy + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRedeploy func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginRedeployOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientRedeployResponse], errResp azfake.ErrorResponder) + + // BeginReimage is the fake for method VirtualMachineScaleSetsClient.BeginReimage + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReimage func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginReimageOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReimageResponse], errResp azfake.ErrorResponder) + + // BeginReimageAll is the fake for method VirtualMachineScaleSetsClient.BeginReimageAll + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReimageAll func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginReimageAllOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReimageAllResponse], errResp azfake.ErrorResponder) + + // BeginRestart is the fake for method VirtualMachineScaleSetsClient.BeginRestart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRestart func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginRestartOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientRestartResponse], errResp azfake.ErrorResponder) + + // BeginSetOrchestrationServiceState is the fake for method VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginSetOrchestrationServiceState func(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters armcompute.OrchestrationServiceStateInput, options *armcompute.VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse], errResp azfake.ErrorResponder) + + // BeginStart is the fake for method VirtualMachineScaleSetsClient.BeginStart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStart func(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginStartOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientStartResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method VirtualMachineScaleSetsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters armcompute.VirtualMachineScaleSetUpdate, options *armcompute.VirtualMachineScaleSetsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientUpdateResponse], errResp azfake.ErrorResponder) + + // BeginUpdateInstances is the fake for method VirtualMachineScaleSetsClient.BeginUpdateInstances + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateInstances func(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, options *armcompute.VirtualMachineScaleSetsClientBeginUpdateInstancesOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientUpdateInstancesResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineScaleSetsServerTransport creates a new instance of VirtualMachineScaleSetsServerTransport with the provided implementation. +// The returned VirtualMachineScaleSetsServerTransport instance is connected to an instance of armcompute.VirtualMachineScaleSetsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineScaleSetsServerTransport(srv *VirtualMachineScaleSetsServer) *VirtualMachineScaleSetsServerTransport { + return &VirtualMachineScaleSetsServerTransport{ + srv: srv, + beginApproveRollingUpgrade: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientApproveRollingUpgradeResponse]](), + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse]](), + beginDeallocate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeallocateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeleteResponse]](), + beginDeleteInstances: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse]](), + newGetOSUpgradeHistoryPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]](), + newListByLocationPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListByLocationResponse]](), + newListSKUsPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListSKUsResponse]](), + beginPerformMaintenance: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientPerformMaintenanceResponse]](), + beginPowerOff: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientPowerOffResponse]](), + beginReapply: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReapplyResponse]](), + beginRedeploy: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientRedeployResponse]](), + beginReimage: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReimageResponse]](), + beginReimageAll: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReimageAllResponse]](), + beginRestart: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientRestartResponse]](), + beginSetOrchestrationServiceState: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse]](), + beginStart: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientStartResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientUpdateResponse]](), + beginUpdateInstances: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientUpdateInstancesResponse]](), + } +} + +// VirtualMachineScaleSetsServerTransport connects instances of armcompute.VirtualMachineScaleSetsClient to instances of VirtualMachineScaleSetsServer. +// Don't use this type directly, use NewVirtualMachineScaleSetsServerTransport instead. +type VirtualMachineScaleSetsServerTransport struct { + srv *VirtualMachineScaleSetsServer + beginApproveRollingUpgrade *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientApproveRollingUpgradeResponse]] + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse]] + beginDeallocate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeallocateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeleteResponse]] + beginDeleteInstances *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse]] + newGetOSUpgradeHistoryPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]] + newListByLocationPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListByLocationResponse]] + newListSKUsPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListSKUsResponse]] + beginPerformMaintenance *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientPerformMaintenanceResponse]] + beginPowerOff *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientPowerOffResponse]] + beginReapply *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReapplyResponse]] + beginRedeploy *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientRedeployResponse]] + beginReimage *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReimageResponse]] + beginReimageAll *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientReimageAllResponse]] + beginRestart *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientRestartResponse]] + beginSetOrchestrationServiceState *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse]] + beginStart *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientStartResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientUpdateResponse]] + beginUpdateInstances *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetsClientUpdateInstancesResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineScaleSetsServerTransport. +func (v *VirtualMachineScaleSetsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade": + resp, err = v.dispatchBeginApproveRollingUpgrade(req) + case "VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup": + resp, err = v.dispatchConvertToSinglePlacementGroup(req) + case "VirtualMachineScaleSetsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualMachineScaleSetsClient.BeginDeallocate": + resp, err = v.dispatchBeginDeallocate(req) + case "VirtualMachineScaleSetsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachineScaleSetsClient.BeginDeleteInstances": + resp, err = v.dispatchBeginDeleteInstances(req) + case "VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk": + resp, err = v.dispatchForceRecoveryServiceFabricPlatformUpdateDomainWalk(req) + case "VirtualMachineScaleSetsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineScaleSetsClient.GetInstanceView": + resp, err = v.dispatchGetInstanceView(req) + case "VirtualMachineScaleSetsClient.NewGetOSUpgradeHistoryPager": + resp, err = v.dispatchNewGetOSUpgradeHistoryPager(req) + case "VirtualMachineScaleSetsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualMachineScaleSetsClient.NewListAllPager": + resp, err = v.dispatchNewListAllPager(req) + case "VirtualMachineScaleSetsClient.NewListByLocationPager": + resp, err = v.dispatchNewListByLocationPager(req) + case "VirtualMachineScaleSetsClient.NewListSKUsPager": + resp, err = v.dispatchNewListSKUsPager(req) + case "VirtualMachineScaleSetsClient.BeginPerformMaintenance": + resp, err = v.dispatchBeginPerformMaintenance(req) + case "VirtualMachineScaleSetsClient.BeginPowerOff": + resp, err = v.dispatchBeginPowerOff(req) + case "VirtualMachineScaleSetsClient.BeginReapply": + resp, err = v.dispatchBeginReapply(req) + case "VirtualMachineScaleSetsClient.BeginRedeploy": + resp, err = v.dispatchBeginRedeploy(req) + case "VirtualMachineScaleSetsClient.BeginReimage": + resp, err = v.dispatchBeginReimage(req) + case "VirtualMachineScaleSetsClient.BeginReimageAll": + resp, err = v.dispatchBeginReimageAll(req) + case "VirtualMachineScaleSetsClient.BeginRestart": + resp, err = v.dispatchBeginRestart(req) + case "VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState": + resp, err = v.dispatchBeginSetOrchestrationServiceState(req) + case "VirtualMachineScaleSetsClient.BeginStart": + resp, err = v.dispatchBeginStart(req) + case "VirtualMachineScaleSetsClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + case "VirtualMachineScaleSetsClient.BeginUpdateInstances": + resp, err = v.dispatchBeginUpdateInstances(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginApproveRollingUpgrade(req *http.Request) (*http.Response, error) { + if v.srv.BeginApproveRollingUpgrade == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginApproveRollingUpgrade not implemented")} + } + beginApproveRollingUpgrade := v.beginApproveRollingUpgrade.get(req) + if beginApproveRollingUpgrade == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/approveRollingUpgrade` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions{ + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginApproveRollingUpgrade(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginApproveRollingUpgrade = &respr + v.beginApproveRollingUpgrade.add(req, beginApproveRollingUpgrade) + } + + resp, err := server.PollerResponderNext(beginApproveRollingUpgrade, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted}, resp.StatusCode) { + v.beginApproveRollingUpgrade.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginApproveRollingUpgrade) { + v.beginApproveRollingUpgrade.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchConvertToSinglePlacementGroup(req *http.Request) (*http.Response, error) { + if v.srv.ConvertToSinglePlacementGroup == nil { + return nil, &nonRetriableError{errors.New("fake for method ConvertToSinglePlacementGroup not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/convertToSinglePlacementGroup` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VMScaleSetConvertToSinglePlacementGroupInput](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.ConvertToSinglePlacementGroup(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSet](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + ifMatchParam := getOptional(getHeaderValue(req.Header, "If-Match")) + ifNoneMatchParam := getOptional(getHeaderValue(req.Header, "If-None-Match")) + var options *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions + if ifMatchParam != nil || ifNoneMatchParam != nil { + options = &armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions{ + IfMatch: ifMatchParam, + IfNoneMatch: ifNoneMatchParam, + } + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginDeallocate(req *http.Request) (*http.Response, error) { + if v.srv.BeginDeallocate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeallocate not implemented")} + } + beginDeallocate := v.beginDeallocate.get(req) + if beginDeallocate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/deallocate` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + hibernateUnescaped, err := url.QueryUnescape(qp.Get("hibernate")) + if err != nil { + return nil, err + } + hibernateParam, err := parseOptional(hibernateUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginDeallocateOptions + if hibernateParam != nil || !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginDeallocateOptions{ + Hibernate: hibernateParam, + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginDeallocate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeallocate = &respr + v.beginDeallocate.add(req, beginDeallocate) + } + + resp, err := server.PollerResponderNext(beginDeallocate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginDeallocate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeallocate) { + v.beginDeallocate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + forceDeletionUnescaped, err := url.QueryUnescape(qp.Get("forceDeletion")) + if err != nil { + return nil, err + } + forceDeletionParam, err := parseOptional(forceDeletionUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginDeleteOptions + if forceDeletionParam != nil { + options = &armcompute.VirtualMachineScaleSetsClientBeginDeleteOptions{ + ForceDeletion: forceDeletionParam, + } + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginDeleteInstances(req *http.Request) (*http.Response, error) { + if v.srv.BeginDeleteInstances == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeleteInstances not implemented")} + } + beginDeleteInstances := v.beginDeleteInstances.get(req) + if beginDeleteInstances == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/delete` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + forceDeletionUnescaped, err := url.QueryUnescape(qp.Get("forceDeletion")) + if err != nil { + return nil, err + } + forceDeletionParam, err := parseOptional(forceDeletionUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions + if forceDeletionParam != nil { + options = &armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions{ + ForceDeletion: forceDeletionParam, + } + } + respr, errRespr := v.srv.BeginDeleteInstances(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeleteInstances = &respr + v.beginDeleteInstances.add(req, beginDeleteInstances) + } + + resp, err := server.PollerResponderNext(beginDeleteInstances, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginDeleteInstances.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeleteInstances) { + v.beginDeleteInstances.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchForceRecoveryServiceFabricPlatformUpdateDomainWalk(req *http.Request) (*http.Response, error) { + if v.srv.ForceRecoveryServiceFabricPlatformUpdateDomainWalk == nil { + return nil, &nonRetriableError{errors.New("fake for method ForceRecoveryServiceFabricPlatformUpdateDomainWalk not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/forceRecoveryServiceFabricPlatformUpdateDomainWalk` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + platformUpdateDomainUnescaped, err := url.QueryUnescape(qp.Get("platformUpdateDomain")) + if err != nil { + return nil, err + } + platformUpdateDomainParam, err := parseWithCast(platformUpdateDomainUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + zoneUnescaped, err := url.QueryUnescape(qp.Get("zone")) + if err != nil { + return nil, err + } + zoneParam := getOptional(zoneUnescaped) + placementGroupIDUnescaped, err := url.QueryUnescape(qp.Get("placementGroupId")) + if err != nil { + return nil, err + } + placementGroupIDParam := getOptional(placementGroupIDUnescaped) + var options *armcompute.VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions + if zoneParam != nil || placementGroupIDParam != nil { + options = &armcompute.VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions{ + Zone: zoneParam, + PlacementGroupID: placementGroupIDParam, + } + } + respr, errRespr := v.srv.ForceRecoveryServiceFabricPlatformUpdateDomainWalk(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, platformUpdateDomainParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RecoveryWalkResponse, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.ExpandTypesForGetVMScaleSets(expandUnescaped)) + var options *armcompute.VirtualMachineScaleSetsClientGetOptions + if expandParam != nil { + options = &armcompute.VirtualMachineScaleSetsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineScaleSet, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchGetInstanceView(req *http.Request) (*http.Response, error) { + if v.srv.GetInstanceView == nil { + return nil, &nonRetriableError{errors.New("fake for method GetInstanceView not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/instanceView` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.GetInstanceView(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineScaleSetInstanceView, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchNewGetOSUpgradeHistoryPager(req *http.Request) (*http.Response, error) { + if v.srv.NewGetOSUpgradeHistoryPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewGetOSUpgradeHistoryPager not implemented")} + } + newGetOSUpgradeHistoryPager := v.newGetOSUpgradeHistoryPager.get(req) + if newGetOSUpgradeHistoryPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/osUpgradeHistory` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewGetOSUpgradeHistoryPager(resourceGroupNameParam, vmScaleSetNameParam, nil) + newGetOSUpgradeHistoryPager = &resp + v.newGetOSUpgradeHistoryPager.add(req, newGetOSUpgradeHistoryPager) + server.PagerResponderInjectNextLinks(newGetOSUpgradeHistoryPager, req, func(page *armcompute.VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newGetOSUpgradeHistoryPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newGetOSUpgradeHistoryPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newGetOSUpgradeHistoryPager) { + v.newGetOSUpgradeHistoryPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.VirtualMachineScaleSetsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := v.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListAllPager(nil) + newListAllPager = &resp + v.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armcompute.VirtualMachineScaleSetsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + v.newListAllPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchNewListByLocationPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByLocationPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByLocationPager not implemented")} + } + newListByLocationPager := v.newListByLocationPager.get(req) + if newListByLocationPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachineScaleSets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByLocationPager(locationParam, nil) + newListByLocationPager = &resp + v.newListByLocationPager.add(req, newListByLocationPager) + server.PagerResponderInjectNextLinks(newListByLocationPager, req, func(page *armcompute.VirtualMachineScaleSetsClientListByLocationResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByLocationPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByLocationPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByLocationPager) { + v.newListByLocationPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchNewListSKUsPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListSKUsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListSKUsPager not implemented")} + } + newListSKUsPager := v.newListSKUsPager.get(req) + if newListSKUsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/skus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListSKUsPager(resourceGroupNameParam, vmScaleSetNameParam, nil) + newListSKUsPager = &resp + v.newListSKUsPager.add(req, newListSKUsPager) + server.PagerResponderInjectNextLinks(newListSKUsPager, req, func(page *armcompute.VirtualMachineScaleSetsClientListSKUsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListSKUsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListSKUsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListSKUsPager) { + v.newListSKUsPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginPerformMaintenance(req *http.Request) (*http.Response, error) { + if v.srv.BeginPerformMaintenance == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPerformMaintenance not implemented")} + } + beginPerformMaintenance := v.beginPerformMaintenance.get(req) + if beginPerformMaintenance == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/performMaintenance` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions{ + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginPerformMaintenance(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPerformMaintenance = &respr + v.beginPerformMaintenance.add(req, beginPerformMaintenance) + } + + resp, err := server.PollerResponderNext(beginPerformMaintenance, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginPerformMaintenance.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPerformMaintenance) { + v.beginPerformMaintenance.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginPowerOff(req *http.Request) (*http.Response, error) { + if v.srv.BeginPowerOff == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPowerOff not implemented")} + } + beginPowerOff := v.beginPowerOff.get(req) + if beginPowerOff == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/poweroff` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + skipShutdownUnescaped, err := url.QueryUnescape(qp.Get("skipShutdown")) + if err != nil { + return nil, err + } + skipShutdownParam, err := parseOptional(skipShutdownUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginPowerOffOptions + if skipShutdownParam != nil || !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginPowerOffOptions{ + SkipShutdown: skipShutdownParam, + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginPowerOff(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPowerOff = &respr + v.beginPowerOff.add(req, beginPowerOff) + } + + resp, err := server.PollerResponderNext(beginPowerOff, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginPowerOff.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPowerOff) { + v.beginPowerOff.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginReapply(req *http.Request) (*http.Response, error) { + if v.srv.BeginReapply == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReapply not implemented")} + } + beginReapply := v.beginReapply.get(req) + if beginReapply == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reapply` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginReapply(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReapply = &respr + v.beginReapply.add(req, beginReapply) + } + + resp, err := server.PollerResponderNext(beginReapply, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReapply.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReapply) { + v.beginReapply.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginRedeploy(req *http.Request) (*http.Response, error) { + if v.srv.BeginRedeploy == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRedeploy not implemented")} + } + beginRedeploy := v.beginRedeploy.get(req) + if beginRedeploy == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/redeploy` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginRedeployOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginRedeployOptions{ + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginRedeploy(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRedeploy = &respr + v.beginRedeploy.add(req, beginRedeploy) + } + + resp, err := server.PollerResponderNext(beginRedeploy, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRedeploy.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRedeploy) { + v.beginRedeploy.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginReimage(req *http.Request) (*http.Response, error) { + if v.srv.BeginReimage == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReimage not implemented")} + } + beginReimage := v.beginReimage.get(req) + if beginReimage == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reimage` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetReimageParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginReimageOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginReimageOptions{ + VMScaleSetReimageInput: &body, + } + } + respr, errRespr := v.srv.BeginReimage(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReimage = &respr + v.beginReimage.add(req, beginReimage) + } + + resp, err := server.PollerResponderNext(beginReimage, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReimage.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReimage) { + v.beginReimage.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginReimageAll(req *http.Request) (*http.Response, error) { + if v.srv.BeginReimageAll == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReimageAll not implemented")} + } + beginReimageAll := v.beginReimageAll.get(req) + if beginReimageAll == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reimageall` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginReimageAllOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginReimageAllOptions{ + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginReimageAll(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReimageAll = &respr + v.beginReimageAll.add(req, beginReimageAll) + } + + resp, err := server.PollerResponderNext(beginReimageAll, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReimageAll.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReimageAll) { + v.beginReimageAll.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginRestart(req *http.Request) (*http.Response, error) { + if v.srv.BeginRestart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRestart not implemented")} + } + beginRestart := v.beginRestart.get(req) + if beginRestart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restart` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginRestartOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginRestartOptions{ + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginRestart(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRestart = &respr + v.beginRestart.add(req, beginRestart) + } + + resp, err := server.PollerResponderNext(beginRestart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRestart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRestart) { + v.beginRestart.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginSetOrchestrationServiceState(req *http.Request) (*http.Response, error) { + if v.srv.BeginSetOrchestrationServiceState == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginSetOrchestrationServiceState not implemented")} + } + beginSetOrchestrationServiceState := v.beginSetOrchestrationServiceState.get(req) + if beginSetOrchestrationServiceState == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/setOrchestrationServiceState` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.OrchestrationServiceStateInput](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginSetOrchestrationServiceState(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginSetOrchestrationServiceState = &respr + v.beginSetOrchestrationServiceState.add(req, beginSetOrchestrationServiceState) + } + + resp, err := server.PollerResponderNext(beginSetOrchestrationServiceState, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginSetOrchestrationServiceState.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginSetOrchestrationServiceState) { + v.beginSetOrchestrationServiceState.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginStart(req *http.Request) (*http.Response, error) { + if v.srv.BeginStart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStart not implemented")} + } + beginStart := v.beginStart.get(req) + if beginStart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/start` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetsClientBeginStartOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetsClientBeginStartOptions{ + VMInstanceIDs: &body, + } + } + respr, errRespr := v.srv.BeginStart(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStart = &respr + v.beginStart.add(req, beginStart) + } + + resp, err := server.PollerResponderNext(beginStart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStart) { + v.beginStart.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + ifMatchParam := getOptional(getHeaderValue(req.Header, "If-Match")) + ifNoneMatchParam := getOptional(getHeaderValue(req.Header, "If-None-Match")) + var options *armcompute.VirtualMachineScaleSetsClientBeginUpdateOptions + if ifMatchParam != nil || ifNoneMatchParam != nil { + options = &armcompute.VirtualMachineScaleSetsClientBeginUpdateOptions{ + IfMatch: ifMatchParam, + IfNoneMatch: ifNoneMatchParam, + } + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetsServerTransport) dispatchBeginUpdateInstances(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdateInstances == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateInstances not implemented")} + } + beginUpdateInstances := v.beginUpdateInstances.get(req) + if beginUpdateInstances == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/manualupgrade` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdateInstances(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateInstances = &respr + v.beginUpdateInstances.add(req, beginUpdateInstances) + } + + resp, err := server.PollerResponderNext(beginUpdateInstances, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginUpdateInstances.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateInstances) { + v.beginUpdateInstances.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvmextensions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvmextensions_server.go new file mode 100644 index 00000000000..94c46980e63 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvmextensions_server.go @@ -0,0 +1,365 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// VirtualMachineScaleSetVMExtensionsServer is a fake server for instances of the armcompute.VirtualMachineScaleSetVMExtensionsClient type. +type VirtualMachineScaleSetVMExtensionsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters armcompute.VirtualMachineScaleSetVMExtension, options *armcompute.VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachineScaleSetVMExtensionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *armcompute.VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachineScaleSetVMExtensionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *armcompute.VirtualMachineScaleSetVMExtensionsClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetVMExtensionsClientGetResponse], errResp azfake.ErrorResponder) + + // List is the fake for method VirtualMachineScaleSetVMExtensionsClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMExtensionsClientListOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetVMExtensionsClientListResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method VirtualMachineScaleSetVMExtensionsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters armcompute.VirtualMachineScaleSetVMExtensionUpdate, options *armcompute.VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineScaleSetVMExtensionsServerTransport creates a new instance of VirtualMachineScaleSetVMExtensionsServerTransport with the provided implementation. +// The returned VirtualMachineScaleSetVMExtensionsServerTransport instance is connected to an instance of armcompute.VirtualMachineScaleSetVMExtensionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineScaleSetVMExtensionsServerTransport(srv *VirtualMachineScaleSetVMExtensionsServer) *VirtualMachineScaleSetVMExtensionsServerTransport { + return &VirtualMachineScaleSetVMExtensionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientDeleteResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientUpdateResponse]](), + } +} + +// VirtualMachineScaleSetVMExtensionsServerTransport connects instances of armcompute.VirtualMachineScaleSetVMExtensionsClient to instances of VirtualMachineScaleSetVMExtensionsServer. +// Don't use this type directly, use NewVirtualMachineScaleSetVMExtensionsServerTransport instead. +type VirtualMachineScaleSetVMExtensionsServerTransport struct { + srv *VirtualMachineScaleSetVMExtensionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientDeleteResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMExtensionsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineScaleSetVMExtensionsServerTransport. +func (v *VirtualMachineScaleSetVMExtensionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualMachineScaleSetVMExtensionsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachineScaleSetVMExtensionsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineScaleSetVMExtensionsClient.List": + resp, err = v.dispatchList(req) + case "VirtualMachineScaleSetVMExtensionsClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMExtensionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMExtension](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, vmExtensionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMExtensionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, vmExtensionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMExtensionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineScaleSetVMExtensionsClientGetOptions + if expandParam != nil { + options = &armcompute.VirtualMachineScaleSetVMExtensionsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, vmExtensionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineScaleSetVMExtension, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMExtensionsServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if v.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineScaleSetVMExtensionsClientListOptions + if expandParam != nil { + options = &armcompute.VirtualMachineScaleSetVMExtensionsClientListOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.List(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineScaleSetVMExtensionsListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMExtensionsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/extensions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMExtensionUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + vmExtensionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmExtensionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, vmExtensionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvmruncommands_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvmruncommands_server.go new file mode 100644 index 00000000000..8997126ee4f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvmruncommands_server.go @@ -0,0 +1,376 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// VirtualMachineScaleSetVMRunCommandsServer is a fake server for instances of the armcompute.VirtualMachineScaleSetVMRunCommandsClient type. +type VirtualMachineScaleSetVMRunCommandsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand armcompute.VirtualMachineRunCommand, options *armcompute.VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachineScaleSetVMRunCommandsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *armcompute.VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachineScaleSetVMRunCommandsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *armcompute.VirtualMachineScaleSetVMRunCommandsClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetVMRunCommandsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualMachineScaleSetVMRunCommandsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMRunCommandsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientListResponse]) + + // BeginUpdate is the fake for method VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand armcompute.VirtualMachineRunCommandUpdate, options *armcompute.VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineScaleSetVMRunCommandsServerTransport creates a new instance of VirtualMachineScaleSetVMRunCommandsServerTransport with the provided implementation. +// The returned VirtualMachineScaleSetVMRunCommandsServerTransport instance is connected to an instance of armcompute.VirtualMachineScaleSetVMRunCommandsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineScaleSetVMRunCommandsServerTransport(srv *VirtualMachineScaleSetVMRunCommandsServer) *VirtualMachineScaleSetVMRunCommandsServerTransport { + return &VirtualMachineScaleSetVMRunCommandsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientListResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientUpdateResponse]](), + } +} + +// VirtualMachineScaleSetVMRunCommandsServerTransport connects instances of armcompute.VirtualMachineScaleSetVMRunCommandsClient to instances of VirtualMachineScaleSetVMRunCommandsServer. +// Don't use this type directly, use NewVirtualMachineScaleSetVMRunCommandsServerTransport instead. +type VirtualMachineScaleSetVMRunCommandsServerTransport struct { + srv *VirtualMachineScaleSetVMRunCommandsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientListResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMRunCommandsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineScaleSetVMRunCommandsServerTransport. +func (v *VirtualMachineScaleSetVMRunCommandsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualMachineScaleSetVMRunCommandsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachineScaleSetVMRunCommandsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineScaleSetVMRunCommandsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMRunCommandsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineRunCommand](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, runCommandNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMRunCommandsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, runCommandNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMRunCommandsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineScaleSetVMRunCommandsClientGetOptions + if expandParam != nil { + options = &armcompute.VirtualMachineScaleSetVMRunCommandsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, runCommandNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineRunCommand, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMRunCommandsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineScaleSetVMRunCommandsClientListOptions + if expandParam != nil { + options = &armcompute.VirtualMachineScaleSetVMRunCommandsClientListOptions{ + Expand: expandParam, + } + } + resp := v.srv.NewListPager(resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, options) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.VirtualMachineScaleSetVMRunCommandsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMRunCommandsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommands/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineRunCommandUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + runCommandNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("runCommandName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, runCommandNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvms_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvms_server.go new file mode 100644 index 00000000000..cdbcf8152e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinescalesetvms_server.go @@ -0,0 +1,1133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "reflect" + "regexp" + "strconv" +) + +// VirtualMachineScaleSetVMsServer is a fake server for instances of the armcompute.VirtualMachineScaleSetVMsClient type. +type VirtualMachineScaleSetVMsServer struct { + // BeginApproveRollingUpgrade is the fake for method VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade + // HTTP status codes to indicate success: http.StatusAccepted + BeginApproveRollingUpgrade func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse], errResp azfake.ErrorResponder) + + // BeginAttachDetachDataDisks is the fake for method VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginAttachDetachDataDisks func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters armcompute.AttachDetachDataDisksRequest, options *armcompute.VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse], errResp azfake.ErrorResponder) + + // BeginDeallocate is the fake for method VirtualMachineScaleSetVMsClient.BeginDeallocate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDeallocate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginDeallocateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientDeallocateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualMachineScaleSetVMsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginDeleteOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualMachineScaleSetVMsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientGetOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetVMsClientGetResponse], errResp azfake.ErrorResponder) + + // GetInstanceView is the fake for method VirtualMachineScaleSetVMsClient.GetInstanceView + // HTTP status codes to indicate success: http.StatusOK + GetInstanceView func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientGetInstanceViewOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetVMsClientGetInstanceViewResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualMachineScaleSetVMsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) + + // BeginPerformMaintenance is the fake for method VirtualMachineScaleSetVMsClient.BeginPerformMaintenance + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPerformMaintenance func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientPerformMaintenanceResponse], errResp azfake.ErrorResponder) + + // BeginPowerOff is the fake for method VirtualMachineScaleSetVMsClient.BeginPowerOff + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPowerOff func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginPowerOffOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientPowerOffResponse], errResp azfake.ErrorResponder) + + // BeginRedeploy is the fake for method VirtualMachineScaleSetVMsClient.BeginRedeploy + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRedeploy func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginRedeployOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRedeployResponse], errResp azfake.ErrorResponder) + + // BeginReimage is the fake for method VirtualMachineScaleSetVMsClient.BeginReimage + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReimage func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginReimageOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientReimageResponse], errResp azfake.ErrorResponder) + + // BeginReimageAll is the fake for method VirtualMachineScaleSetVMsClient.BeginReimageAll + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReimageAll func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginReimageAllOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientReimageAllResponse], errResp azfake.ErrorResponder) + + // BeginRestart is the fake for method VirtualMachineScaleSetVMsClient.BeginRestart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRestart func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginRestartOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRestartResponse], errResp azfake.ErrorResponder) + + // RetrieveBootDiagnosticsData is the fake for method VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData + // HTTP status codes to indicate success: http.StatusOK + RetrieveBootDiagnosticsData func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse], errResp azfake.ErrorResponder) + + // BeginRunCommand is the fake for method VirtualMachineScaleSetVMsClient.BeginRunCommand + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginRunCommand func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters armcompute.RunCommandInput, options *armcompute.VirtualMachineScaleSetVMsClientBeginRunCommandOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRunCommandResponse], errResp azfake.ErrorResponder) + + // SimulateEviction is the fake for method VirtualMachineScaleSetVMsClient.SimulateEviction + // HTTP status codes to indicate success: http.StatusNoContent + SimulateEviction func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientSimulateEvictionOptions) (resp azfake.Responder[armcompute.VirtualMachineScaleSetVMsClientSimulateEvictionResponse], errResp azfake.ErrorResponder) + + // BeginStart is the fake for method VirtualMachineScaleSetVMsClient.BeginStart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStart func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginStartOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientStartResponse], errResp azfake.ErrorResponder) + + // BeginUpdate is the fake for method VirtualMachineScaleSetVMsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM, options *armcompute.VirtualMachineScaleSetVMsClientBeginUpdateOptions) (resp azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualMachineScaleSetVMsServerTransport creates a new instance of VirtualMachineScaleSetVMsServerTransport with the provided implementation. +// The returned VirtualMachineScaleSetVMsServerTransport instance is connected to an instance of armcompute.VirtualMachineScaleSetVMsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineScaleSetVMsServerTransport(srv *VirtualMachineScaleSetVMsServer) *VirtualMachineScaleSetVMsServerTransport { + return &VirtualMachineScaleSetVMsServerTransport{ + srv: srv, + beginApproveRollingUpgrade: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse]](), + beginAttachDetachDataDisks: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse]](), + beginDeallocate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientDeallocateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]](), + beginPerformMaintenance: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientPerformMaintenanceResponse]](), + beginPowerOff: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientPowerOffResponse]](), + beginRedeploy: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRedeployResponse]](), + beginReimage: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientReimageResponse]](), + beginReimageAll: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientReimageAllResponse]](), + beginRestart: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRestartResponse]](), + beginRunCommand: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRunCommandResponse]](), + beginStart: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientStartResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse]](), + } +} + +// VirtualMachineScaleSetVMsServerTransport connects instances of armcompute.VirtualMachineScaleSetVMsClient to instances of VirtualMachineScaleSetVMsServer. +// Don't use this type directly, use NewVirtualMachineScaleSetVMsServerTransport instead. +type VirtualMachineScaleSetVMsServerTransport struct { + srv *VirtualMachineScaleSetVMsServer + beginApproveRollingUpgrade *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse]] + beginAttachDetachDataDisks *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse]] + beginDeallocate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientDeallocateResponse]] + beginDelete *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]] + beginPerformMaintenance *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientPerformMaintenanceResponse]] + beginPowerOff *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientPowerOffResponse]] + beginRedeploy *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRedeployResponse]] + beginReimage *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientReimageResponse]] + beginReimageAll *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientReimageAllResponse]] + beginRestart *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRestartResponse]] + beginRunCommand *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientRunCommandResponse]] + beginStart *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientStartResponse]] + beginUpdate *tracker[azfake.PollerResponder[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineScaleSetVMsServerTransport. +func (v *VirtualMachineScaleSetVMsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade": + resp, err = v.dispatchBeginApproveRollingUpgrade(req) + case "VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks": + resp, err = v.dispatchBeginAttachDetachDataDisks(req) + case "VirtualMachineScaleSetVMsClient.BeginDeallocate": + resp, err = v.dispatchBeginDeallocate(req) + case "VirtualMachineScaleSetVMsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualMachineScaleSetVMsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualMachineScaleSetVMsClient.GetInstanceView": + resp, err = v.dispatchGetInstanceView(req) + case "VirtualMachineScaleSetVMsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualMachineScaleSetVMsClient.BeginPerformMaintenance": + resp, err = v.dispatchBeginPerformMaintenance(req) + case "VirtualMachineScaleSetVMsClient.BeginPowerOff": + resp, err = v.dispatchBeginPowerOff(req) + case "VirtualMachineScaleSetVMsClient.BeginRedeploy": + resp, err = v.dispatchBeginRedeploy(req) + case "VirtualMachineScaleSetVMsClient.BeginReimage": + resp, err = v.dispatchBeginReimage(req) + case "VirtualMachineScaleSetVMsClient.BeginReimageAll": + resp, err = v.dispatchBeginReimageAll(req) + case "VirtualMachineScaleSetVMsClient.BeginRestart": + resp, err = v.dispatchBeginRestart(req) + case "VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData": + resp, err = v.dispatchRetrieveBootDiagnosticsData(req) + case "VirtualMachineScaleSetVMsClient.BeginRunCommand": + resp, err = v.dispatchBeginRunCommand(req) + case "VirtualMachineScaleSetVMsClient.SimulateEviction": + resp, err = v.dispatchSimulateEviction(req) + case "VirtualMachineScaleSetVMsClient.BeginStart": + resp, err = v.dispatchBeginStart(req) + case "VirtualMachineScaleSetVMsClient.BeginUpdate": + resp, err = v.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginApproveRollingUpgrade(req *http.Request) (*http.Response, error) { + if v.srv.BeginApproveRollingUpgrade == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginApproveRollingUpgrade not implemented")} + } + beginApproveRollingUpgrade := v.beginApproveRollingUpgrade.get(req) + if beginApproveRollingUpgrade == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/approveRollingUpgrade` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginApproveRollingUpgrade(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginApproveRollingUpgrade = &respr + v.beginApproveRollingUpgrade.add(req, beginApproveRollingUpgrade) + } + + resp, err := server.PollerResponderNext(beginApproveRollingUpgrade, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted}, resp.StatusCode) { + v.beginApproveRollingUpgrade.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginApproveRollingUpgrade) { + v.beginApproveRollingUpgrade.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginAttachDetachDataDisks(req *http.Request) (*http.Response, error) { + if v.srv.BeginAttachDetachDataDisks == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginAttachDetachDataDisks not implemented")} + } + beginAttachDetachDataDisks := v.beginAttachDetachDataDisks.get(req) + if beginAttachDetachDataDisks == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/attachDetachDataDisks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.AttachDetachDataDisksRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginAttachDetachDataDisks(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginAttachDetachDataDisks = &respr + v.beginAttachDetachDataDisks.add(req, beginAttachDetachDataDisks) + } + + resp, err := server.PollerResponderNext(beginAttachDetachDataDisks, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginAttachDetachDataDisks.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginAttachDetachDataDisks) { + v.beginAttachDetachDataDisks.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginDeallocate(req *http.Request) (*http.Response, error) { + if v.srv.BeginDeallocate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeallocate not implemented")} + } + beginDeallocate := v.beginDeallocate.get(req) + if beginDeallocate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/deallocate` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDeallocate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeallocate = &respr + v.beginDeallocate.add(req, beginDeallocate) + } + + resp, err := server.PollerResponderNext(beginDeallocate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginDeallocate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeallocate) { + v.beginDeallocate.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + forceDeletionUnescaped, err := url.QueryUnescape(qp.Get("forceDeletion")) + if err != nil { + return nil, err + } + forceDeletionParam, err := parseOptional(forceDeletionUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetVMsClientBeginDeleteOptions + if forceDeletionParam != nil { + options = &armcompute.VirtualMachineScaleSetVMsClientBeginDeleteOptions{ + ForceDeletion: forceDeletionParam, + } + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(armcompute.InstanceViewTypes(expandUnescaped)) + var options *armcompute.VirtualMachineScaleSetVMsClientGetOptions + if expandParam != nil { + options = &armcompute.VirtualMachineScaleSetVMsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineScaleSetVM, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchGetInstanceView(req *http.Request) (*http.Response, error) { + if v.srv.GetInstanceView == nil { + return nil, &nonRetriableError{errors.New("fake for method GetInstanceView not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/instanceView` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.GetInstanceView(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualMachineScaleSetVMInstanceView, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + filterUnescaped, err := url.QueryUnescape(qp.Get("$filter")) + if err != nil { + return nil, err + } + filterParam := getOptional(filterUnescaped) + selectUnescaped, err := url.QueryUnescape(qp.Get("$select")) + if err != nil { + return nil, err + } + selectParam := getOptional(selectUnescaped) + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armcompute.VirtualMachineScaleSetVMsClientListOptions + if filterParam != nil || selectParam != nil || expandParam != nil { + options = &armcompute.VirtualMachineScaleSetVMsClientListOptions{ + Filter: filterParam, + Select: selectParam, + Expand: expandParam, + } + } + resp := v.srv.NewListPager(resourceGroupNameParam, virtualMachineScaleSetNameParam, options) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armcompute.VirtualMachineScaleSetVMsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginPerformMaintenance(req *http.Request) (*http.Response, error) { + if v.srv.BeginPerformMaintenance == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPerformMaintenance not implemented")} + } + beginPerformMaintenance := v.beginPerformMaintenance.get(req) + if beginPerformMaintenance == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/performMaintenance` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginPerformMaintenance(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPerformMaintenance = &respr + v.beginPerformMaintenance.add(req, beginPerformMaintenance) + } + + resp, err := server.PollerResponderNext(beginPerformMaintenance, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginPerformMaintenance.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPerformMaintenance) { + v.beginPerformMaintenance.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginPowerOff(req *http.Request) (*http.Response, error) { + if v.srv.BeginPowerOff == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPowerOff not implemented")} + } + beginPowerOff := v.beginPowerOff.get(req) + if beginPowerOff == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/poweroff` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + skipShutdownUnescaped, err := url.QueryUnescape(qp.Get("skipShutdown")) + if err != nil { + return nil, err + } + skipShutdownParam, err := parseOptional(skipShutdownUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetVMsClientBeginPowerOffOptions + if skipShutdownParam != nil { + options = &armcompute.VirtualMachineScaleSetVMsClientBeginPowerOffOptions{ + SkipShutdown: skipShutdownParam, + } + } + respr, errRespr := v.srv.BeginPowerOff(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPowerOff = &respr + v.beginPowerOff.add(req, beginPowerOff) + } + + resp, err := server.PollerResponderNext(beginPowerOff, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginPowerOff.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPowerOff) { + v.beginPowerOff.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginRedeploy(req *http.Request) (*http.Response, error) { + if v.srv.BeginRedeploy == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRedeploy not implemented")} + } + beginRedeploy := v.beginRedeploy.get(req) + if beginRedeploy == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/redeploy` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginRedeploy(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRedeploy = &respr + v.beginRedeploy.add(req, beginRedeploy) + } + + resp, err := server.PollerResponderNext(beginRedeploy, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRedeploy.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRedeploy) { + v.beginRedeploy.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginReimage(req *http.Request) (*http.Response, error) { + if v.srv.BeginReimage == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReimage not implemented")} + } + beginReimage := v.beginReimage.get(req) + if beginReimage == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reimage` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVMReimageParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetVMsClientBeginReimageOptions + if !reflect.ValueOf(body).IsZero() { + options = &armcompute.VirtualMachineScaleSetVMsClientBeginReimageOptions{ + VMScaleSetVMReimageInput: &body, + } + } + respr, errRespr := v.srv.BeginReimage(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReimage = &respr + v.beginReimage.add(req, beginReimage) + } + + resp, err := server.PollerResponderNext(beginReimage, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReimage.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReimage) { + v.beginReimage.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginReimageAll(req *http.Request) (*http.Response, error) { + if v.srv.BeginReimageAll == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReimageAll not implemented")} + } + beginReimageAll := v.beginReimageAll.get(req) + if beginReimageAll == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reimageall` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginReimageAll(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReimageAll = &respr + v.beginReimageAll.add(req, beginReimageAll) + } + + resp, err := server.PollerResponderNext(beginReimageAll, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReimageAll.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReimageAll) { + v.beginReimageAll.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginRestart(req *http.Request) (*http.Response, error) { + if v.srv.BeginRestart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRestart not implemented")} + } + beginRestart := v.beginRestart.get(req) + if beginRestart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/restart` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginRestart(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRestart = &respr + v.beginRestart.add(req, beginRestart) + } + + resp, err := server.PollerResponderNext(beginRestart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRestart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRestart) { + v.beginRestart.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchRetrieveBootDiagnosticsData(req *http.Request) (*http.Response, error) { + if v.srv.RetrieveBootDiagnosticsData == nil { + return nil, &nonRetriableError{errors.New("fake for method RetrieveBootDiagnosticsData not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/retrieveBootDiagnosticsData` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + sasURIExpirationTimeInMinutesUnescaped, err := url.QueryUnescape(qp.Get("sasUriExpirationTimeInMinutes")) + if err != nil { + return nil, err + } + sasURIExpirationTimeInMinutesParam, err := parseOptional(sasURIExpirationTimeInMinutesUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + var options *armcompute.VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions + if sasURIExpirationTimeInMinutesParam != nil { + options = &armcompute.VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions{ + SasURIExpirationTimeInMinutes: sasURIExpirationTimeInMinutesParam, + } + } + respr, errRespr := v.srv.RetrieveBootDiagnosticsData(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RetrieveBootDiagnosticsDataResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginRunCommand(req *http.Request) (*http.Response, error) { + if v.srv.BeginRunCommand == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginRunCommand not implemented")} + } + beginRunCommand := v.beginRunCommand.get(req) + if beginRunCommand == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/runCommand` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.RunCommandInput](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginRunCommand(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginRunCommand = &respr + v.beginRunCommand.add(req, beginRunCommand) + } + + resp, err := server.PollerResponderNext(beginRunCommand, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginRunCommand.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginRunCommand) { + v.beginRunCommand.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchSimulateEviction(req *http.Request) (*http.Response, error) { + if v.srv.SimulateEviction == nil { + return nil, &nonRetriableError{errors.New("fake for method SimulateEviction not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/simulateEviction` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.SimulateEviction(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginStart(req *http.Request) (*http.Response, error) { + if v.srv.BeginStart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStart not implemented")} + } + beginStart := v.beginStart.get(req) + if beginStart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualmachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/start` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginStart(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStart = &respr + v.beginStart.add(req, beginStart) + } + + resp, err := server.PollerResponderNext(beginStart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStart) { + v.beginStart.remove(req) + } + + return resp, nil +} + +func (v *VirtualMachineScaleSetVMsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := v.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armcompute.VirtualMachineScaleSetVM](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vmScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vmScaleSetName")]) + if err != nil { + return nil, err + } + instanceIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("instanceId")]) + if err != nil { + return nil, err + } + ifMatchParam := getOptional(getHeaderValue(req.Header, "If-Match")) + ifNoneMatchParam := getOptional(getHeaderValue(req.Header, "If-None-Match")) + var options *armcompute.VirtualMachineScaleSetVMsClientBeginUpdateOptions + if ifMatchParam != nil || ifNoneMatchParam != nil { + options = &armcompute.VirtualMachineScaleSetVMsClientBeginUpdateOptions{ + IfMatch: ifMatchParam, + IfNoneMatch: ifNoneMatchParam, + } + } + respr, errRespr := v.srv.BeginUpdate(req.Context(), resourceGroupNameParam, vmScaleSetNameParam, instanceIDParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + v.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + v.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinesizes_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinesizes_server.go new file mode 100644 index 00000000000..1e1051ed339 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake/virtualmachinesizes_server.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "net/http" + "net/url" + "regexp" +) + +// VirtualMachineSizesServer is a fake server for instances of the armcompute.VirtualMachineSizesClient type. +type VirtualMachineSizesServer struct { + // NewListPager is the fake for method VirtualMachineSizesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armcompute.VirtualMachineSizesClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineSizesClientListResponse]) +} + +// NewVirtualMachineSizesServerTransport creates a new instance of VirtualMachineSizesServerTransport with the provided implementation. +// The returned VirtualMachineSizesServerTransport instance is connected to an instance of armcompute.VirtualMachineSizesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualMachineSizesServerTransport(srv *VirtualMachineSizesServer) *VirtualMachineSizesServerTransport { + return &VirtualMachineSizesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armcompute.VirtualMachineSizesClientListResponse]](), + } +} + +// VirtualMachineSizesServerTransport connects instances of armcompute.VirtualMachineSizesClient to instances of VirtualMachineSizesServer. +// Don't use this type directly, use NewVirtualMachineSizesServerTransport instead. +type VirtualMachineSizesServerTransport struct { + srv *VirtualMachineSizesServer + newListPager *tracker[azfake.PagerResponder[armcompute.VirtualMachineSizesClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualMachineSizesServerTransport. +func (v *VirtualMachineSizesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualMachineSizesClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualMachineSizesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vmSizes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(locationParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/adminrulecollections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/adminrulecollections_server.go new file mode 100644 index 00000000000..b7d591e2906 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/adminrulecollections_server.go @@ -0,0 +1,318 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// AdminRuleCollectionsServer is a fake server for instances of the armnetwork.AdminRuleCollectionsClient type. +type AdminRuleCollectionsServer struct { + // CreateOrUpdate is the fake for method AdminRuleCollectionsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleCollection armnetwork.AdminRuleCollection, options *armnetwork.AdminRuleCollectionsClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.AdminRuleCollectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method AdminRuleCollectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, options *armnetwork.AdminRuleCollectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.AdminRuleCollectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method AdminRuleCollectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, options *armnetwork.AdminRuleCollectionsClientGetOptions) (resp azfake.Responder[armnetwork.AdminRuleCollectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method AdminRuleCollectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkManagerName string, configurationName string, options *armnetwork.AdminRuleCollectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.AdminRuleCollectionsClientListResponse]) +} + +// NewAdminRuleCollectionsServerTransport creates a new instance of AdminRuleCollectionsServerTransport with the provided implementation. +// The returned AdminRuleCollectionsServerTransport instance is connected to an instance of armnetwork.AdminRuleCollectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAdminRuleCollectionsServerTransport(srv *AdminRuleCollectionsServer) *AdminRuleCollectionsServerTransport { + return &AdminRuleCollectionsServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.AdminRuleCollectionsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.AdminRuleCollectionsClientListResponse]](), + } +} + +// AdminRuleCollectionsServerTransport connects instances of armnetwork.AdminRuleCollectionsClient to instances of AdminRuleCollectionsServer. +// Don't use this type directly, use NewAdminRuleCollectionsServerTransport instead. +type AdminRuleCollectionsServerTransport struct { + srv *AdminRuleCollectionsServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.AdminRuleCollectionsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.AdminRuleCollectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for AdminRuleCollectionsServerTransport. +func (a *AdminRuleCollectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AdminRuleCollectionsClient.CreateOrUpdate": + resp, err = a.dispatchCreateOrUpdate(req) + case "AdminRuleCollectionsClient.BeginDelete": + resp, err = a.dispatchBeginDelete(req) + case "AdminRuleCollectionsClient.Get": + resp, err = a.dispatchGet(req) + case "AdminRuleCollectionsClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AdminRuleCollectionsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if a.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.AdminRuleCollection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, ruleCollectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AdminRuleCollection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AdminRuleCollectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if a.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := a.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + forceUnescaped, err := url.QueryUnescape(qp.Get("force")) + if err != nil { + return nil, err + } + forceParam, err := parseOptional(forceUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armnetwork.AdminRuleCollectionsClientBeginDeleteOptions + if forceParam != nil { + options = &armnetwork.AdminRuleCollectionsClientBeginDeleteOptions{ + Force: forceParam, + } + } + respr, errRespr := a.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, ruleCollectionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + a.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + a.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + a.beginDelete.remove(req) + } + + return resp, nil +} + +func (a *AdminRuleCollectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, ruleCollectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AdminRuleCollection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AdminRuleCollectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.AdminRuleCollectionsClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.AdminRuleCollectionsClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := a.srv.NewListPager(resourceGroupNameParam, networkManagerNameParam, configurationNameParam, options) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AdminRuleCollectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/adminrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/adminrules_server.go new file mode 100644 index 00000000000..8863aa27b19 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/adminrules_server.go @@ -0,0 +1,338 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// AdminRulesServer is a fake server for instances of the armnetwork.AdminRulesClient type. +type AdminRulesServer struct { + // CreateOrUpdate is the fake for method AdminRulesClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleName string, adminRule armnetwork.BaseAdminRuleClassification, options *armnetwork.AdminRulesClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.AdminRulesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method AdminRulesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleName string, options *armnetwork.AdminRulesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.AdminRulesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method AdminRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleName string, options *armnetwork.AdminRulesClientGetOptions) (resp azfake.Responder[armnetwork.AdminRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method AdminRulesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, options *armnetwork.AdminRulesClientListOptions) (resp azfake.PagerResponder[armnetwork.AdminRulesClientListResponse]) +} + +// NewAdminRulesServerTransport creates a new instance of AdminRulesServerTransport with the provided implementation. +// The returned AdminRulesServerTransport instance is connected to an instance of armnetwork.AdminRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAdminRulesServerTransport(srv *AdminRulesServer) *AdminRulesServerTransport { + return &AdminRulesServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.AdminRulesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.AdminRulesClientListResponse]](), + } +} + +// AdminRulesServerTransport connects instances of armnetwork.AdminRulesClient to instances of AdminRulesServer. +// Don't use this type directly, use NewAdminRulesServerTransport instead. +type AdminRulesServerTransport struct { + srv *AdminRulesServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.AdminRulesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.AdminRulesClientListResponse]] +} + +// Do implements the policy.Transporter interface for AdminRulesServerTransport. +func (a *AdminRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AdminRulesClient.CreateOrUpdate": + resp, err = a.dispatchCreateOrUpdate(req) + case "AdminRulesClient.BeginDelete": + resp, err = a.dispatchBeginDelete(req) + case "AdminRulesClient.Get": + resp, err = a.dispatchGet(req) + case "AdminRulesClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AdminRulesServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if a.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + raw, err := readRequestBody(req) + if err != nil { + return nil, err + } + body, err := unmarshalBaseAdminRuleClassification(raw) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + ruleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, ruleCollectionNameParam, ruleNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).BaseAdminRuleClassification, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AdminRulesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if a.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := a.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + ruleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleName")]) + if err != nil { + return nil, err + } + forceUnescaped, err := url.QueryUnescape(qp.Get("force")) + if err != nil { + return nil, err + } + forceParam, err := parseOptional(forceUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armnetwork.AdminRulesClientBeginDeleteOptions + if forceParam != nil { + options = &armnetwork.AdminRulesClientBeginDeleteOptions{ + Force: forceParam, + } + } + respr, errRespr := a.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, ruleCollectionNameParam, ruleNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + a.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + a.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + a.beginDelete.remove(req) + } + + return resp, nil +} + +func (a *AdminRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + ruleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, ruleCollectionNameParam, ruleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).BaseAdminRuleClassification, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AdminRulesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/rules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.AdminRulesClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.AdminRulesClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := a.srv.NewListPager(resourceGroupNameParam, networkManagerNameParam, configurationNameParam, ruleCollectionNameParam, options) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AdminRulesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewayprivateendpointconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewayprivateendpointconnections_server.go new file mode 100644 index 00000000000..fd4f06f67c9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewayprivateendpointconnections_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ApplicationGatewayPrivateEndpointConnectionsServer is a fake server for instances of the armnetwork.ApplicationGatewayPrivateEndpointConnectionsClient type. +type ApplicationGatewayPrivateEndpointConnectionsServer struct { + // BeginDelete is the fake for method ApplicationGatewayPrivateEndpointConnectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, applicationGatewayName string, connectionName string, options *armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ApplicationGatewayPrivateEndpointConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, applicationGatewayName string, connectionName string, options *armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ApplicationGatewayPrivateEndpointConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, applicationGatewayName string, options *armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientListResponse]) + + // BeginUpdate is the fake for method ApplicationGatewayPrivateEndpointConnectionsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, applicationGatewayName string, connectionName string, parameters armnetwork.ApplicationGatewayPrivateEndpointConnection, options *armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientBeginUpdateOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewApplicationGatewayPrivateEndpointConnectionsServerTransport creates a new instance of ApplicationGatewayPrivateEndpointConnectionsServerTransport with the provided implementation. +// The returned ApplicationGatewayPrivateEndpointConnectionsServerTransport instance is connected to an instance of armnetwork.ApplicationGatewayPrivateEndpointConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewApplicationGatewayPrivateEndpointConnectionsServerTransport(srv *ApplicationGatewayPrivateEndpointConnectionsServer) *ApplicationGatewayPrivateEndpointConnectionsServerTransport { + return &ApplicationGatewayPrivateEndpointConnectionsServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientListResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientUpdateResponse]](), + } +} + +// ApplicationGatewayPrivateEndpointConnectionsServerTransport connects instances of armnetwork.ApplicationGatewayPrivateEndpointConnectionsClient to instances of ApplicationGatewayPrivateEndpointConnectionsServer. +// Don't use this type directly, use NewApplicationGatewayPrivateEndpointConnectionsServerTransport instead. +type ApplicationGatewayPrivateEndpointConnectionsServerTransport struct { + srv *ApplicationGatewayPrivateEndpointConnectionsServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientListResponse]] + beginUpdate *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for ApplicationGatewayPrivateEndpointConnectionsServerTransport. +func (a *ApplicationGatewayPrivateEndpointConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ApplicationGatewayPrivateEndpointConnectionsClient.BeginDelete": + resp, err = a.dispatchBeginDelete(req) + case "ApplicationGatewayPrivateEndpointConnectionsClient.Get": + resp, err = a.dispatchGet(req) + case "ApplicationGatewayPrivateEndpointConnectionsClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + case "ApplicationGatewayPrivateEndpointConnectionsClient.BeginUpdate": + resp, err = a.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *ApplicationGatewayPrivateEndpointConnectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if a.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := a.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginDelete(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + a.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + a.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + a.beginDelete.remove(req) + } + + return resp, nil +} + +func (a *ApplicationGatewayPrivateEndpointConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationGatewayPrivateEndpointConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewayPrivateEndpointConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(resourceGroupNameParam, applicationGatewayNameParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ApplicationGatewayPrivateEndpointConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} + +func (a *ApplicationGatewayPrivateEndpointConnectionsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if a.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := a.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ApplicationGatewayPrivateEndpointConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginUpdate(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, connectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + a.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + a.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewayprivatelinkresources_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewayprivatelinkresources_server.go new file mode 100644 index 00000000000..d4f6e5785ad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewayprivatelinkresources_server.go @@ -0,0 +1,112 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ApplicationGatewayPrivateLinkResourcesServer is a fake server for instances of the armnetwork.ApplicationGatewayPrivateLinkResourcesClient type. +type ApplicationGatewayPrivateLinkResourcesServer struct { + // NewListPager is the fake for method ApplicationGatewayPrivateLinkResourcesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, applicationGatewayName string, options *armnetwork.ApplicationGatewayPrivateLinkResourcesClientListOptions) (resp azfake.PagerResponder[armnetwork.ApplicationGatewayPrivateLinkResourcesClientListResponse]) +} + +// NewApplicationGatewayPrivateLinkResourcesServerTransport creates a new instance of ApplicationGatewayPrivateLinkResourcesServerTransport with the provided implementation. +// The returned ApplicationGatewayPrivateLinkResourcesServerTransport instance is connected to an instance of armnetwork.ApplicationGatewayPrivateLinkResourcesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewApplicationGatewayPrivateLinkResourcesServerTransport(srv *ApplicationGatewayPrivateLinkResourcesServer) *ApplicationGatewayPrivateLinkResourcesServerTransport { + return &ApplicationGatewayPrivateLinkResourcesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationGatewayPrivateLinkResourcesClientListResponse]](), + } +} + +// ApplicationGatewayPrivateLinkResourcesServerTransport connects instances of armnetwork.ApplicationGatewayPrivateLinkResourcesClient to instances of ApplicationGatewayPrivateLinkResourcesServer. +// Don't use this type directly, use NewApplicationGatewayPrivateLinkResourcesServerTransport instead. +type ApplicationGatewayPrivateLinkResourcesServerTransport struct { + srv *ApplicationGatewayPrivateLinkResourcesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.ApplicationGatewayPrivateLinkResourcesClientListResponse]] +} + +// Do implements the policy.Transporter interface for ApplicationGatewayPrivateLinkResourcesServerTransport. +func (a *ApplicationGatewayPrivateLinkResourcesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ApplicationGatewayPrivateLinkResourcesClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *ApplicationGatewayPrivateLinkResourcesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateLinkResources` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(resourceGroupNameParam, applicationGatewayNameParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ApplicationGatewayPrivateLinkResourcesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgateways_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgateways_server.go new file mode 100644 index 00000000000..ae13cb76110 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgateways_server.go @@ -0,0 +1,807 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ApplicationGatewaysServer is a fake server for instances of the armnetwork.ApplicationGatewaysClient type. +type ApplicationGatewaysServer struct { + // BeginBackendHealth is the fake for method ApplicationGatewaysClient.BeginBackendHealth + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginBackendHealth func(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *armnetwork.ApplicationGatewaysClientBeginBackendHealthOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewaysClientBackendHealthResponse], errResp azfake.ErrorResponder) + + // BeginBackendHealthOnDemand is the fake for method ApplicationGatewaysClient.BeginBackendHealthOnDemand + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginBackendHealthOnDemand func(ctx context.Context, resourceGroupName string, applicationGatewayName string, probeRequest armnetwork.ApplicationGatewayOnDemandProbe, options *armnetwork.ApplicationGatewaysClientBeginBackendHealthOnDemandOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewaysClientBackendHealthOnDemandResponse], errResp azfake.ErrorResponder) + + // BeginCreateOrUpdate is the fake for method ApplicationGatewaysClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, applicationGatewayName string, parameters armnetwork.ApplicationGateway, options *armnetwork.ApplicationGatewaysClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewaysClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ApplicationGatewaysClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *armnetwork.ApplicationGatewaysClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewaysClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ApplicationGatewaysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *armnetwork.ApplicationGatewaysClientGetOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientGetResponse], errResp azfake.ErrorResponder) + + // GetSSLPredefinedPolicy is the fake for method ApplicationGatewaysClient.GetSSLPredefinedPolicy + // HTTP status codes to indicate success: http.StatusOK + GetSSLPredefinedPolicy func(ctx context.Context, predefinedPolicyName string, options *armnetwork.ApplicationGatewaysClientGetSSLPredefinedPolicyOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientGetSSLPredefinedPolicyResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ApplicationGatewaysClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.ApplicationGatewaysClientListOptions) (resp azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListResponse]) + + // NewListAllPager is the fake for method ApplicationGatewaysClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.ApplicationGatewaysClientListAllOptions) (resp azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListAllResponse]) + + // ListAvailableRequestHeaders is the fake for method ApplicationGatewaysClient.ListAvailableRequestHeaders + // HTTP status codes to indicate success: http.StatusOK + ListAvailableRequestHeaders func(ctx context.Context, options *armnetwork.ApplicationGatewaysClientListAvailableRequestHeadersOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientListAvailableRequestHeadersResponse], errResp azfake.ErrorResponder) + + // ListAvailableResponseHeaders is the fake for method ApplicationGatewaysClient.ListAvailableResponseHeaders + // HTTP status codes to indicate success: http.StatusOK + ListAvailableResponseHeaders func(ctx context.Context, options *armnetwork.ApplicationGatewaysClientListAvailableResponseHeadersOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientListAvailableResponseHeadersResponse], errResp azfake.ErrorResponder) + + // ListAvailableSSLOptions is the fake for method ApplicationGatewaysClient.ListAvailableSSLOptions + // HTTP status codes to indicate success: http.StatusOK + ListAvailableSSLOptions func(ctx context.Context, options *armnetwork.ApplicationGatewaysClientListAvailableSSLOptionsOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientListAvailableSSLOptionsResponse], errResp azfake.ErrorResponder) + + // NewListAvailableSSLPredefinedPoliciesPager is the fake for method ApplicationGatewaysClient.NewListAvailableSSLPredefinedPoliciesPager + // HTTP status codes to indicate success: http.StatusOK + NewListAvailableSSLPredefinedPoliciesPager func(options *armnetwork.ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesOptions) (resp azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse]) + + // ListAvailableServerVariables is the fake for method ApplicationGatewaysClient.ListAvailableServerVariables + // HTTP status codes to indicate success: http.StatusOK + ListAvailableServerVariables func(ctx context.Context, options *armnetwork.ApplicationGatewaysClientListAvailableServerVariablesOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientListAvailableServerVariablesResponse], errResp azfake.ErrorResponder) + + // ListAvailableWafRuleSets is the fake for method ApplicationGatewaysClient.ListAvailableWafRuleSets + // HTTP status codes to indicate success: http.StatusOK + ListAvailableWafRuleSets func(ctx context.Context, options *armnetwork.ApplicationGatewaysClientListAvailableWafRuleSetsOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientListAvailableWafRuleSetsResponse], errResp azfake.ErrorResponder) + + // BeginStart is the fake for method ApplicationGatewaysClient.BeginStart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStart func(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *armnetwork.ApplicationGatewaysClientBeginStartOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewaysClientStartResponse], errResp azfake.ErrorResponder) + + // BeginStop is the fake for method ApplicationGatewaysClient.BeginStop + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStop func(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *armnetwork.ApplicationGatewaysClientBeginStopOptions) (resp azfake.PollerResponder[armnetwork.ApplicationGatewaysClientStopResponse], errResp azfake.ErrorResponder) + + // UpdateTags is the fake for method ApplicationGatewaysClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, applicationGatewayName string, parameters armnetwork.TagsObject, options *armnetwork.ApplicationGatewaysClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ApplicationGatewaysClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewApplicationGatewaysServerTransport creates a new instance of ApplicationGatewaysServerTransport with the provided implementation. +// The returned ApplicationGatewaysServerTransport instance is connected to an instance of armnetwork.ApplicationGatewaysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewApplicationGatewaysServerTransport(srv *ApplicationGatewaysServer) *ApplicationGatewaysServerTransport { + return &ApplicationGatewaysServerTransport{ + srv: srv, + beginBackendHealth: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientBackendHealthResponse]](), + beginBackendHealthOnDemand: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientBackendHealthOnDemandResponse]](), + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListAllResponse]](), + newListAvailableSSLPredefinedPoliciesPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse]](), + beginStart: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientStartResponse]](), + beginStop: newTracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientStopResponse]](), + } +} + +// ApplicationGatewaysServerTransport connects instances of armnetwork.ApplicationGatewaysClient to instances of ApplicationGatewaysServer. +// Don't use this type directly, use NewApplicationGatewaysServerTransport instead. +type ApplicationGatewaysServerTransport struct { + srv *ApplicationGatewaysServer + beginBackendHealth *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientBackendHealthResponse]] + beginBackendHealthOnDemand *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientBackendHealthOnDemandResponse]] + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListAllResponse]] + newListAvailableSSLPredefinedPoliciesPager *tracker[azfake.PagerResponder[armnetwork.ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse]] + beginStart *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientStartResponse]] + beginStop *tracker[azfake.PollerResponder[armnetwork.ApplicationGatewaysClientStopResponse]] +} + +// Do implements the policy.Transporter interface for ApplicationGatewaysServerTransport. +func (a *ApplicationGatewaysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ApplicationGatewaysClient.BeginBackendHealth": + resp, err = a.dispatchBeginBackendHealth(req) + case "ApplicationGatewaysClient.BeginBackendHealthOnDemand": + resp, err = a.dispatchBeginBackendHealthOnDemand(req) + case "ApplicationGatewaysClient.BeginCreateOrUpdate": + resp, err = a.dispatchBeginCreateOrUpdate(req) + case "ApplicationGatewaysClient.BeginDelete": + resp, err = a.dispatchBeginDelete(req) + case "ApplicationGatewaysClient.Get": + resp, err = a.dispatchGet(req) + case "ApplicationGatewaysClient.GetSSLPredefinedPolicy": + resp, err = a.dispatchGetSSLPredefinedPolicy(req) + case "ApplicationGatewaysClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + case "ApplicationGatewaysClient.NewListAllPager": + resp, err = a.dispatchNewListAllPager(req) + case "ApplicationGatewaysClient.ListAvailableRequestHeaders": + resp, err = a.dispatchListAvailableRequestHeaders(req) + case "ApplicationGatewaysClient.ListAvailableResponseHeaders": + resp, err = a.dispatchListAvailableResponseHeaders(req) + case "ApplicationGatewaysClient.ListAvailableSSLOptions": + resp, err = a.dispatchListAvailableSSLOptions(req) + case "ApplicationGatewaysClient.NewListAvailableSSLPredefinedPoliciesPager": + resp, err = a.dispatchNewListAvailableSSLPredefinedPoliciesPager(req) + case "ApplicationGatewaysClient.ListAvailableServerVariables": + resp, err = a.dispatchListAvailableServerVariables(req) + case "ApplicationGatewaysClient.ListAvailableWafRuleSets": + resp, err = a.dispatchListAvailableWafRuleSets(req) + case "ApplicationGatewaysClient.BeginStart": + resp, err = a.dispatchBeginStart(req) + case "ApplicationGatewaysClient.BeginStop": + resp, err = a.dispatchBeginStop(req) + case "ApplicationGatewaysClient.UpdateTags": + resp, err = a.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchBeginBackendHealth(req *http.Request) (*http.Response, error) { + if a.srv.BeginBackendHealth == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginBackendHealth not implemented")} + } + beginBackendHealth := a.beginBackendHealth.get(req) + if beginBackendHealth == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/backendhealth` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.ApplicationGatewaysClientBeginBackendHealthOptions + if expandParam != nil { + options = &armnetwork.ApplicationGatewaysClientBeginBackendHealthOptions{ + Expand: expandParam, + } + } + respr, errRespr := a.srv.BeginBackendHealth(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginBackendHealth = &respr + a.beginBackendHealth.add(req, beginBackendHealth) + } + + resp, err := server.PollerResponderNext(beginBackendHealth, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginBackendHealth.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginBackendHealth) { + a.beginBackendHealth.remove(req) + } + + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchBeginBackendHealthOnDemand(req *http.Request) (*http.Response, error) { + if a.srv.BeginBackendHealthOnDemand == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginBackendHealthOnDemand not implemented")} + } + beginBackendHealthOnDemand := a.beginBackendHealthOnDemand.get(req) + if beginBackendHealthOnDemand == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getBackendHealthOnDemand` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.ApplicationGatewayOnDemandProbe](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.ApplicationGatewaysClientBeginBackendHealthOnDemandOptions + if expandParam != nil { + options = &armnetwork.ApplicationGatewaysClientBeginBackendHealthOnDemandOptions{ + Expand: expandParam, + } + } + respr, errRespr := a.srv.BeginBackendHealthOnDemand(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginBackendHealthOnDemand = &respr + a.beginBackendHealthOnDemand.add(req, beginBackendHealthOnDemand) + } + + resp, err := server.PollerResponderNext(beginBackendHealthOnDemand, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginBackendHealthOnDemand.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginBackendHealthOnDemand) { + a.beginBackendHealthOnDemand.remove(req) + } + + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if a.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := a.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ApplicationGateway](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + a.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + a.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + a.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if a.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := a.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginDelete(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + a.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + a.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + a.beginDelete.remove(req) + } + + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchGetSSLPredefinedPolicy(req *http.Request) (*http.Response, error) { + if a.srv.GetSSLPredefinedPolicy == nil { + return nil, &nonRetriableError{errors.New("fake for method GetSSLPredefinedPolicy not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + predefinedPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("predefinedPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.GetSSLPredefinedPolicy(req.Context(), predefinedPolicyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationGatewaySSLPredefinedPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ApplicationGatewaysClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := a.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := a.srv.NewListAllPager(nil) + newListAllPager = &resp + a.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.ApplicationGatewaysClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + a.newListAllPager.remove(req) + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchListAvailableRequestHeaders(req *http.Request) (*http.Response, error) { + if a.srv.ListAvailableRequestHeaders == nil { + return nil, &nonRetriableError{errors.New("fake for method ListAvailableRequestHeaders not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGatewayAvailableRequestHeaders` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + respr, errRespr := a.srv.ListAvailableRequestHeaders(req.Context(), nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).StringArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchListAvailableResponseHeaders(req *http.Request) (*http.Response, error) { + if a.srv.ListAvailableResponseHeaders == nil { + return nil, &nonRetriableError{errors.New("fake for method ListAvailableResponseHeaders not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGatewayAvailableResponseHeaders` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + respr, errRespr := a.srv.ListAvailableResponseHeaders(req.Context(), nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).StringArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchListAvailableSSLOptions(req *http.Request) (*http.Response, error) { + if a.srv.ListAvailableSSLOptions == nil { + return nil, &nonRetriableError{errors.New("fake for method ListAvailableSSLOptions not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGatewayAvailableSslOptions/default` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + respr, errRespr := a.srv.ListAvailableSSLOptions(req.Context(), nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationGatewayAvailableSSLOptions, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchNewListAvailableSSLPredefinedPoliciesPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListAvailableSSLPredefinedPoliciesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAvailableSSLPredefinedPoliciesPager not implemented")} + } + newListAvailableSSLPredefinedPoliciesPager := a.newListAvailableSSLPredefinedPoliciesPager.get(req) + if newListAvailableSSLPredefinedPoliciesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := a.srv.NewListAvailableSSLPredefinedPoliciesPager(nil) + newListAvailableSSLPredefinedPoliciesPager = &resp + a.newListAvailableSSLPredefinedPoliciesPager.add(req, newListAvailableSSLPredefinedPoliciesPager) + server.PagerResponderInjectNextLinks(newListAvailableSSLPredefinedPoliciesPager, req, func(page *armnetwork.ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAvailableSSLPredefinedPoliciesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListAvailableSSLPredefinedPoliciesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAvailableSSLPredefinedPoliciesPager) { + a.newListAvailableSSLPredefinedPoliciesPager.remove(req) + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchListAvailableServerVariables(req *http.Request) (*http.Response, error) { + if a.srv.ListAvailableServerVariables == nil { + return nil, &nonRetriableError{errors.New("fake for method ListAvailableServerVariables not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGatewayAvailableServerVariables` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + respr, errRespr := a.srv.ListAvailableServerVariables(req.Context(), nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).StringArray, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchListAvailableWafRuleSets(req *http.Request) (*http.Response, error) { + if a.srv.ListAvailableWafRuleSets == nil { + return nil, &nonRetriableError{errors.New("fake for method ListAvailableWafRuleSets not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGatewayAvailableWafRuleSets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + respr, errRespr := a.srv.ListAvailableWafRuleSets(req.Context(), nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationGatewayAvailableWafRuleSetsResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchBeginStart(req *http.Request) (*http.Response, error) { + if a.srv.BeginStart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStart not implemented")} + } + beginStart := a.beginStart.get(req) + if beginStart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/start` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginStart(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStart = &respr + a.beginStart.add(req, beginStart) + } + + resp, err := server.PollerResponderNext(beginStart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginStart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStart) { + a.beginStart.remove(req) + } + + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchBeginStop(req *http.Request) (*http.Response, error) { + if a.srv.BeginStop == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStop not implemented")} + } + beginStop := a.beginStop.get(req) + if beginStop == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stop` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginStop(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStop = &respr + a.beginStop.add(req, beginStop) + } + + resp, err := server.PollerResponderNext(beginStop, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginStop.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStop) { + a.beginStop.remove(req) + } + + return resp, nil +} + +func (a *ApplicationGatewaysServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if a.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.UpdateTags(req.Context(), resourceGroupNameParam, applicationGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewaywafdynamicmanifests_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewaywafdynamicmanifests_server.go new file mode 100644 index 00000000000..9f8a136fb25 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewaywafdynamicmanifests_server.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ApplicationGatewayWafDynamicManifestsServer is a fake server for instances of the armnetwork.ApplicationGatewayWafDynamicManifestsClient type. +type ApplicationGatewayWafDynamicManifestsServer struct { + // NewGetPager is the fake for method ApplicationGatewayWafDynamicManifestsClient.NewGetPager + // HTTP status codes to indicate success: http.StatusOK + NewGetPager func(location string, options *armnetwork.ApplicationGatewayWafDynamicManifestsClientGetOptions) (resp azfake.PagerResponder[armnetwork.ApplicationGatewayWafDynamicManifestsClientGetResponse]) +} + +// NewApplicationGatewayWafDynamicManifestsServerTransport creates a new instance of ApplicationGatewayWafDynamicManifestsServerTransport with the provided implementation. +// The returned ApplicationGatewayWafDynamicManifestsServerTransport instance is connected to an instance of armnetwork.ApplicationGatewayWafDynamicManifestsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewApplicationGatewayWafDynamicManifestsServerTransport(srv *ApplicationGatewayWafDynamicManifestsServer) *ApplicationGatewayWafDynamicManifestsServerTransport { + return &ApplicationGatewayWafDynamicManifestsServerTransport{ + srv: srv, + newGetPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationGatewayWafDynamicManifestsClientGetResponse]](), + } +} + +// ApplicationGatewayWafDynamicManifestsServerTransport connects instances of armnetwork.ApplicationGatewayWafDynamicManifestsClient to instances of ApplicationGatewayWafDynamicManifestsServer. +// Don't use this type directly, use NewApplicationGatewayWafDynamicManifestsServerTransport instead. +type ApplicationGatewayWafDynamicManifestsServerTransport struct { + srv *ApplicationGatewayWafDynamicManifestsServer + newGetPager *tracker[azfake.PagerResponder[armnetwork.ApplicationGatewayWafDynamicManifestsClientGetResponse]] +} + +// Do implements the policy.Transporter interface for ApplicationGatewayWafDynamicManifestsServerTransport. +func (a *ApplicationGatewayWafDynamicManifestsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ApplicationGatewayWafDynamicManifestsClient.NewGetPager": + resp, err = a.dispatchNewGetPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *ApplicationGatewayWafDynamicManifestsServerTransport) dispatchNewGetPager(req *http.Request) (*http.Response, error) { + if a.srv.NewGetPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewGetPager not implemented")} + } + newGetPager := a.newGetPager.get(req) + if newGetPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applicationGatewayWafDynamicManifests` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := a.srv.NewGetPager(locationParam, nil) + newGetPager = &resp + a.newGetPager.add(req, newGetPager) + server.PagerResponderInjectNextLinks(newGetPager, req, func(page *armnetwork.ApplicationGatewayWafDynamicManifestsClientGetResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newGetPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newGetPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newGetPager) { + a.newGetPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewaywafdynamicmanifestsdefault_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewaywafdynamicmanifestsdefault_server.go new file mode 100644 index 00000000000..e9179b72c8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationgatewaywafdynamicmanifestsdefault_server.go @@ -0,0 +1,96 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ApplicationGatewayWafDynamicManifestsDefaultServer is a fake server for instances of the armnetwork.ApplicationGatewayWafDynamicManifestsDefaultClient type. +type ApplicationGatewayWafDynamicManifestsDefaultServer struct { + // Get is the fake for method ApplicationGatewayWafDynamicManifestsDefaultClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, location string, options *armnetwork.ApplicationGatewayWafDynamicManifestsDefaultClientGetOptions) (resp azfake.Responder[armnetwork.ApplicationGatewayWafDynamicManifestsDefaultClientGetResponse], errResp azfake.ErrorResponder) +} + +// NewApplicationGatewayWafDynamicManifestsDefaultServerTransport creates a new instance of ApplicationGatewayWafDynamicManifestsDefaultServerTransport with the provided implementation. +// The returned ApplicationGatewayWafDynamicManifestsDefaultServerTransport instance is connected to an instance of armnetwork.ApplicationGatewayWafDynamicManifestsDefaultClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewApplicationGatewayWafDynamicManifestsDefaultServerTransport(srv *ApplicationGatewayWafDynamicManifestsDefaultServer) *ApplicationGatewayWafDynamicManifestsDefaultServerTransport { + return &ApplicationGatewayWafDynamicManifestsDefaultServerTransport{srv: srv} +} + +// ApplicationGatewayWafDynamicManifestsDefaultServerTransport connects instances of armnetwork.ApplicationGatewayWafDynamicManifestsDefaultClient to instances of ApplicationGatewayWafDynamicManifestsDefaultServer. +// Don't use this type directly, use NewApplicationGatewayWafDynamicManifestsDefaultServerTransport instead. +type ApplicationGatewayWafDynamicManifestsDefaultServerTransport struct { + srv *ApplicationGatewayWafDynamicManifestsDefaultServer +} + +// Do implements the policy.Transporter interface for ApplicationGatewayWafDynamicManifestsDefaultServerTransport. +func (a *ApplicationGatewayWafDynamicManifestsDefaultServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ApplicationGatewayWafDynamicManifestsDefaultClient.Get": + resp, err = a.dispatchGet(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *ApplicationGatewayWafDynamicManifestsDefaultServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/applicationGatewayWafDynamicManifests/dafault` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), locationParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationGatewayWafDynamicManifestResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationsecuritygroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationsecuritygroups_server.go new file mode 100644 index 00000000000..6e19cf53ed1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/applicationsecuritygroups_server.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ApplicationSecurityGroupsServer is a fake server for instances of the armnetwork.ApplicationSecurityGroupsClient type. +type ApplicationSecurityGroupsServer struct { + // BeginCreateOrUpdate is the fake for method ApplicationSecurityGroupsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, parameters armnetwork.ApplicationSecurityGroup, options *armnetwork.ApplicationSecurityGroupsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ApplicationSecurityGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ApplicationSecurityGroupsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, options *armnetwork.ApplicationSecurityGroupsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ApplicationSecurityGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ApplicationSecurityGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, options *armnetwork.ApplicationSecurityGroupsClientGetOptions) (resp azfake.Responder[armnetwork.ApplicationSecurityGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ApplicationSecurityGroupsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.ApplicationSecurityGroupsClientListOptions) (resp azfake.PagerResponder[armnetwork.ApplicationSecurityGroupsClientListResponse]) + + // NewListAllPager is the fake for method ApplicationSecurityGroupsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.ApplicationSecurityGroupsClientListAllOptions) (resp azfake.PagerResponder[armnetwork.ApplicationSecurityGroupsClientListAllResponse]) + + // UpdateTags is the fake for method ApplicationSecurityGroupsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, parameters armnetwork.TagsObject, options *armnetwork.ApplicationSecurityGroupsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ApplicationSecurityGroupsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewApplicationSecurityGroupsServerTransport creates a new instance of ApplicationSecurityGroupsServerTransport with the provided implementation. +// The returned ApplicationSecurityGroupsServerTransport instance is connected to an instance of armnetwork.ApplicationSecurityGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewApplicationSecurityGroupsServerTransport(srv *ApplicationSecurityGroupsServer) *ApplicationSecurityGroupsServerTransport { + return &ApplicationSecurityGroupsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ApplicationSecurityGroupsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ApplicationSecurityGroupsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationSecurityGroupsClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.ApplicationSecurityGroupsClientListAllResponse]](), + } +} + +// ApplicationSecurityGroupsServerTransport connects instances of armnetwork.ApplicationSecurityGroupsClient to instances of ApplicationSecurityGroupsServer. +// Don't use this type directly, use NewApplicationSecurityGroupsServerTransport instead. +type ApplicationSecurityGroupsServerTransport struct { + srv *ApplicationSecurityGroupsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ApplicationSecurityGroupsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ApplicationSecurityGroupsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ApplicationSecurityGroupsClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.ApplicationSecurityGroupsClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for ApplicationSecurityGroupsServerTransport. +func (a *ApplicationSecurityGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ApplicationSecurityGroupsClient.BeginCreateOrUpdate": + resp, err = a.dispatchBeginCreateOrUpdate(req) + case "ApplicationSecurityGroupsClient.BeginDelete": + resp, err = a.dispatchBeginDelete(req) + case "ApplicationSecurityGroupsClient.Get": + resp, err = a.dispatchGet(req) + case "ApplicationSecurityGroupsClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + case "ApplicationSecurityGroupsClient.NewListAllPager": + resp, err = a.dispatchNewListAllPager(req) + case "ApplicationSecurityGroupsClient.UpdateTags": + resp, err = a.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *ApplicationSecurityGroupsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if a.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := a.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ApplicationSecurityGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationSecurityGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, applicationSecurityGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + a.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + a.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + a.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (a *ApplicationSecurityGroupsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if a.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := a.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationSecurityGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginDelete(req.Context(), resourceGroupNameParam, applicationSecurityGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + a.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + a.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + a.beginDelete.remove(req) + } + + return resp, nil +} + +func (a *ApplicationSecurityGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationSecurityGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, applicationSecurityGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationSecurityGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *ApplicationSecurityGroupsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationSecurityGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ApplicationSecurityGroupsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} + +func (a *ApplicationSecurityGroupsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := a.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationSecurityGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := a.srv.NewListAllPager(nil) + newListAllPager = &resp + a.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.ApplicationSecurityGroupsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + a.newListAllPager.remove(req) + } + return resp, nil +} + +func (a *ApplicationSecurityGroupsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if a.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/applicationSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + applicationSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("applicationSecurityGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.UpdateTags(req.Context(), resourceGroupNameParam, applicationSecurityGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ApplicationSecurityGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availabledelegations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availabledelegations_server.go new file mode 100644 index 00000000000..e5360210942 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availabledelegations_server.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// AvailableDelegationsServer is a fake server for instances of the armnetwork.AvailableDelegationsClient type. +type AvailableDelegationsServer struct { + // NewListPager is the fake for method AvailableDelegationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armnetwork.AvailableDelegationsClientListOptions) (resp azfake.PagerResponder[armnetwork.AvailableDelegationsClientListResponse]) +} + +// NewAvailableDelegationsServerTransport creates a new instance of AvailableDelegationsServerTransport with the provided implementation. +// The returned AvailableDelegationsServerTransport instance is connected to an instance of armnetwork.AvailableDelegationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAvailableDelegationsServerTransport(srv *AvailableDelegationsServer) *AvailableDelegationsServerTransport { + return &AvailableDelegationsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.AvailableDelegationsClientListResponse]](), + } +} + +// AvailableDelegationsServerTransport connects instances of armnetwork.AvailableDelegationsClient to instances of AvailableDelegationsServer. +// Don't use this type directly, use NewAvailableDelegationsServerTransport instead. +type AvailableDelegationsServerTransport struct { + srv *AvailableDelegationsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.AvailableDelegationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for AvailableDelegationsServerTransport. +func (a *AvailableDelegationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AvailableDelegationsClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AvailableDelegationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availableDelegations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(locationParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AvailableDelegationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableendpointservices_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableendpointservices_server.go new file mode 100644 index 00000000000..e364d5e591b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableendpointservices_server.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// AvailableEndpointServicesServer is a fake server for instances of the armnetwork.AvailableEndpointServicesClient type. +type AvailableEndpointServicesServer struct { + // NewListPager is the fake for method AvailableEndpointServicesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armnetwork.AvailableEndpointServicesClientListOptions) (resp azfake.PagerResponder[armnetwork.AvailableEndpointServicesClientListResponse]) +} + +// NewAvailableEndpointServicesServerTransport creates a new instance of AvailableEndpointServicesServerTransport with the provided implementation. +// The returned AvailableEndpointServicesServerTransport instance is connected to an instance of armnetwork.AvailableEndpointServicesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAvailableEndpointServicesServerTransport(srv *AvailableEndpointServicesServer) *AvailableEndpointServicesServerTransport { + return &AvailableEndpointServicesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.AvailableEndpointServicesClientListResponse]](), + } +} + +// AvailableEndpointServicesServerTransport connects instances of armnetwork.AvailableEndpointServicesClient to instances of AvailableEndpointServicesServer. +// Don't use this type directly, use NewAvailableEndpointServicesServerTransport instead. +type AvailableEndpointServicesServerTransport struct { + srv *AvailableEndpointServicesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.AvailableEndpointServicesClientListResponse]] +} + +// Do implements the policy.Transporter interface for AvailableEndpointServicesServerTransport. +func (a *AvailableEndpointServicesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AvailableEndpointServicesClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AvailableEndpointServicesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualNetworkAvailableEndpointServices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(locationParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AvailableEndpointServicesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableprivateendpointtypes_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableprivateendpointtypes_server.go new file mode 100644 index 00000000000..51612ed50bb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableprivateendpointtypes_server.go @@ -0,0 +1,157 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// AvailablePrivateEndpointTypesServer is a fake server for instances of the armnetwork.AvailablePrivateEndpointTypesClient type. +type AvailablePrivateEndpointTypesServer struct { + // NewListPager is the fake for method AvailablePrivateEndpointTypesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armnetwork.AvailablePrivateEndpointTypesClientListOptions) (resp azfake.PagerResponder[armnetwork.AvailablePrivateEndpointTypesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method AvailablePrivateEndpointTypesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(location string, resourceGroupName string, options *armnetwork.AvailablePrivateEndpointTypesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.AvailablePrivateEndpointTypesClientListByResourceGroupResponse]) +} + +// NewAvailablePrivateEndpointTypesServerTransport creates a new instance of AvailablePrivateEndpointTypesServerTransport with the provided implementation. +// The returned AvailablePrivateEndpointTypesServerTransport instance is connected to an instance of armnetwork.AvailablePrivateEndpointTypesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAvailablePrivateEndpointTypesServerTransport(srv *AvailablePrivateEndpointTypesServer) *AvailablePrivateEndpointTypesServerTransport { + return &AvailablePrivateEndpointTypesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.AvailablePrivateEndpointTypesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.AvailablePrivateEndpointTypesClientListByResourceGroupResponse]](), + } +} + +// AvailablePrivateEndpointTypesServerTransport connects instances of armnetwork.AvailablePrivateEndpointTypesClient to instances of AvailablePrivateEndpointTypesServer. +// Don't use this type directly, use NewAvailablePrivateEndpointTypesServerTransport instead. +type AvailablePrivateEndpointTypesServerTransport struct { + srv *AvailablePrivateEndpointTypesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.AvailablePrivateEndpointTypesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.AvailablePrivateEndpointTypesClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for AvailablePrivateEndpointTypesServerTransport. +func (a *AvailablePrivateEndpointTypesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AvailablePrivateEndpointTypesClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + case "AvailablePrivateEndpointTypesClient.NewListByResourceGroupPager": + resp, err = a.dispatchNewListByResourceGroupPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AvailablePrivateEndpointTypesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availablePrivateEndpointTypes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(locationParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AvailablePrivateEndpointTypesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} + +func (a *AvailablePrivateEndpointTypesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := a.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availablePrivateEndpointTypes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListByResourceGroupPager(locationParam, resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + a.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.AvailablePrivateEndpointTypesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + a.newListByResourceGroupPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableresourcegroupdelegations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableresourcegroupdelegations_server.go new file mode 100644 index 00000000000..01389052dac --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableresourcegroupdelegations_server.go @@ -0,0 +1,112 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// AvailableResourceGroupDelegationsServer is a fake server for instances of the armnetwork.AvailableResourceGroupDelegationsClient type. +type AvailableResourceGroupDelegationsServer struct { + // NewListPager is the fake for method AvailableResourceGroupDelegationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, resourceGroupName string, options *armnetwork.AvailableResourceGroupDelegationsClientListOptions) (resp azfake.PagerResponder[armnetwork.AvailableResourceGroupDelegationsClientListResponse]) +} + +// NewAvailableResourceGroupDelegationsServerTransport creates a new instance of AvailableResourceGroupDelegationsServerTransport with the provided implementation. +// The returned AvailableResourceGroupDelegationsServerTransport instance is connected to an instance of armnetwork.AvailableResourceGroupDelegationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAvailableResourceGroupDelegationsServerTransport(srv *AvailableResourceGroupDelegationsServer) *AvailableResourceGroupDelegationsServerTransport { + return &AvailableResourceGroupDelegationsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.AvailableResourceGroupDelegationsClientListResponse]](), + } +} + +// AvailableResourceGroupDelegationsServerTransport connects instances of armnetwork.AvailableResourceGroupDelegationsClient to instances of AvailableResourceGroupDelegationsServer. +// Don't use this type directly, use NewAvailableResourceGroupDelegationsServerTransport instead. +type AvailableResourceGroupDelegationsServerTransport struct { + srv *AvailableResourceGroupDelegationsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.AvailableResourceGroupDelegationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for AvailableResourceGroupDelegationsServerTransport. +func (a *AvailableResourceGroupDelegationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AvailableResourceGroupDelegationsClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AvailableResourceGroupDelegationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availableDelegations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(locationParam, resourceGroupNameParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AvailableResourceGroupDelegationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableservicealiases_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableservicealiases_server.go new file mode 100644 index 00000000000..dd55194e652 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/availableservicealiases_server.go @@ -0,0 +1,157 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// AvailableServiceAliasesServer is a fake server for instances of the armnetwork.AvailableServiceAliasesClient type. +type AvailableServiceAliasesServer struct { + // NewListPager is the fake for method AvailableServiceAliasesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armnetwork.AvailableServiceAliasesClientListOptions) (resp azfake.PagerResponder[armnetwork.AvailableServiceAliasesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method AvailableServiceAliasesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, location string, options *armnetwork.AvailableServiceAliasesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.AvailableServiceAliasesClientListByResourceGroupResponse]) +} + +// NewAvailableServiceAliasesServerTransport creates a new instance of AvailableServiceAliasesServerTransport with the provided implementation. +// The returned AvailableServiceAliasesServerTransport instance is connected to an instance of armnetwork.AvailableServiceAliasesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAvailableServiceAliasesServerTransport(srv *AvailableServiceAliasesServer) *AvailableServiceAliasesServerTransport { + return &AvailableServiceAliasesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.AvailableServiceAliasesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.AvailableServiceAliasesClientListByResourceGroupResponse]](), + } +} + +// AvailableServiceAliasesServerTransport connects instances of armnetwork.AvailableServiceAliasesClient to instances of AvailableServiceAliasesServer. +// Don't use this type directly, use NewAvailableServiceAliasesServerTransport instead. +type AvailableServiceAliasesServerTransport struct { + srv *AvailableServiceAliasesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.AvailableServiceAliasesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.AvailableServiceAliasesClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for AvailableServiceAliasesServerTransport. +func (a *AvailableServiceAliasesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AvailableServiceAliasesClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + case "AvailableServiceAliasesClient.NewListByResourceGroupPager": + resp, err = a.dispatchNewListByResourceGroupPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AvailableServiceAliasesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availableServiceAliases` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(locationParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AvailableServiceAliasesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} + +func (a *AvailableServiceAliasesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := a.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availableServiceAliases` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListByResourceGroupPager(resourceGroupNameParam, locationParam, nil) + newListByResourceGroupPager = &resp + a.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.AvailableServiceAliasesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + a.newListByResourceGroupPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/azurefirewallfqdntags_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/azurefirewallfqdntags_server.go new file mode 100644 index 00000000000..25c3c6d7564 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/azurefirewallfqdntags_server.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "regexp" +) + +// AzureFirewallFqdnTagsServer is a fake server for instances of the armnetwork.AzureFirewallFqdnTagsClient type. +type AzureFirewallFqdnTagsServer struct { + // NewListAllPager is the fake for method AzureFirewallFqdnTagsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.AzureFirewallFqdnTagsClientListAllOptions) (resp azfake.PagerResponder[armnetwork.AzureFirewallFqdnTagsClientListAllResponse]) +} + +// NewAzureFirewallFqdnTagsServerTransport creates a new instance of AzureFirewallFqdnTagsServerTransport with the provided implementation. +// The returned AzureFirewallFqdnTagsServerTransport instance is connected to an instance of armnetwork.AzureFirewallFqdnTagsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAzureFirewallFqdnTagsServerTransport(srv *AzureFirewallFqdnTagsServer) *AzureFirewallFqdnTagsServerTransport { + return &AzureFirewallFqdnTagsServerTransport{ + srv: srv, + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.AzureFirewallFqdnTagsClientListAllResponse]](), + } +} + +// AzureFirewallFqdnTagsServerTransport connects instances of armnetwork.AzureFirewallFqdnTagsClient to instances of AzureFirewallFqdnTagsServer. +// Don't use this type directly, use NewAzureFirewallFqdnTagsServerTransport instead. +type AzureFirewallFqdnTagsServerTransport struct { + srv *AzureFirewallFqdnTagsServer + newListAllPager *tracker[azfake.PagerResponder[armnetwork.AzureFirewallFqdnTagsClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for AzureFirewallFqdnTagsServerTransport. +func (a *AzureFirewallFqdnTagsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AzureFirewallFqdnTagsClient.NewListAllPager": + resp, err = a.dispatchNewListAllPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AzureFirewallFqdnTagsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := a.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewallFqdnTags` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := a.srv.NewListAllPager(nil) + newListAllPager = &resp + a.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.AzureFirewallFqdnTagsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + a.newListAllPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/azurefirewalls_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/azurefirewalls_server.go new file mode 100644 index 00000000000..31583dc033c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/azurefirewalls_server.go @@ -0,0 +1,461 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// AzureFirewallsServer is a fake server for instances of the armnetwork.AzureFirewallsClient type. +type AzureFirewallsServer struct { + // BeginCreateOrUpdate is the fake for method AzureFirewallsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, azureFirewallName string, parameters armnetwork.AzureFirewall, options *armnetwork.AzureFirewallsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.AzureFirewallsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method AzureFirewallsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, azureFirewallName string, options *armnetwork.AzureFirewallsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.AzureFirewallsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method AzureFirewallsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, azureFirewallName string, options *armnetwork.AzureFirewallsClientGetOptions) (resp azfake.Responder[armnetwork.AzureFirewallsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method AzureFirewallsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.AzureFirewallsClientListOptions) (resp azfake.PagerResponder[armnetwork.AzureFirewallsClientListResponse]) + + // NewListAllPager is the fake for method AzureFirewallsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.AzureFirewallsClientListAllOptions) (resp azfake.PagerResponder[armnetwork.AzureFirewallsClientListAllResponse]) + + // BeginListLearnedPrefixes is the fake for method AzureFirewallsClient.BeginListLearnedPrefixes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListLearnedPrefixes func(ctx context.Context, resourceGroupName string, azureFirewallName string, options *armnetwork.AzureFirewallsClientBeginListLearnedPrefixesOptions) (resp azfake.PollerResponder[armnetwork.AzureFirewallsClientListLearnedPrefixesResponse], errResp azfake.ErrorResponder) + + // BeginPacketCapture is the fake for method AzureFirewallsClient.BeginPacketCapture + // HTTP status codes to indicate success: http.StatusAccepted + BeginPacketCapture func(ctx context.Context, resourceGroupName string, azureFirewallName string, parameters armnetwork.FirewallPacketCaptureParameters, options *armnetwork.AzureFirewallsClientBeginPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.AzureFirewallsClientPacketCaptureResponse], errResp azfake.ErrorResponder) + + // BeginUpdateTags is the fake for method AzureFirewallsClient.BeginUpdateTags + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateTags func(ctx context.Context, resourceGroupName string, azureFirewallName string, parameters armnetwork.TagsObject, options *armnetwork.AzureFirewallsClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armnetwork.AzureFirewallsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewAzureFirewallsServerTransport creates a new instance of AzureFirewallsServerTransport with the provided implementation. +// The returned AzureFirewallsServerTransport instance is connected to an instance of armnetwork.AzureFirewallsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAzureFirewallsServerTransport(srv *AzureFirewallsServer) *AzureFirewallsServerTransport { + return &AzureFirewallsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.AzureFirewallsClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.AzureFirewallsClientListAllResponse]](), + beginListLearnedPrefixes: newTracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientListLearnedPrefixesResponse]](), + beginPacketCapture: newTracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientPacketCaptureResponse]](), + beginUpdateTags: newTracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientUpdateTagsResponse]](), + } +} + +// AzureFirewallsServerTransport connects instances of armnetwork.AzureFirewallsClient to instances of AzureFirewallsServer. +// Don't use this type directly, use NewAzureFirewallsServerTransport instead. +type AzureFirewallsServerTransport struct { + srv *AzureFirewallsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.AzureFirewallsClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.AzureFirewallsClientListAllResponse]] + beginListLearnedPrefixes *tracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientListLearnedPrefixesResponse]] + beginPacketCapture *tracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientPacketCaptureResponse]] + beginUpdateTags *tracker[azfake.PollerResponder[armnetwork.AzureFirewallsClientUpdateTagsResponse]] +} + +// Do implements the policy.Transporter interface for AzureFirewallsServerTransport. +func (a *AzureFirewallsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AzureFirewallsClient.BeginCreateOrUpdate": + resp, err = a.dispatchBeginCreateOrUpdate(req) + case "AzureFirewallsClient.BeginDelete": + resp, err = a.dispatchBeginDelete(req) + case "AzureFirewallsClient.Get": + resp, err = a.dispatchGet(req) + case "AzureFirewallsClient.NewListPager": + resp, err = a.dispatchNewListPager(req) + case "AzureFirewallsClient.NewListAllPager": + resp, err = a.dispatchNewListAllPager(req) + case "AzureFirewallsClient.BeginListLearnedPrefixes": + resp, err = a.dispatchBeginListLearnedPrefixes(req) + case "AzureFirewallsClient.BeginPacketCapture": + resp, err = a.dispatchBeginPacketCapture(req) + case "AzureFirewallsClient.BeginUpdateTags": + resp, err = a.dispatchBeginUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if a.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := a.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.AzureFirewall](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + azureFirewallNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("azureFirewallName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, azureFirewallNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + a.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + a.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + a.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if a.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := a.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + azureFirewallNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("azureFirewallName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginDelete(req.Context(), resourceGroupNameParam, azureFirewallNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + a.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + a.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + a.beginDelete.remove(req) + } + + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + azureFirewallNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("azureFirewallName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, azureFirewallNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AzureFirewall, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := a.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + a.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.AzureFirewallsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + a.newListPager.remove(req) + } + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := a.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := a.srv.NewListAllPager(nil) + newListAllPager = &resp + a.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.AzureFirewallsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + a.newListAllPager.remove(req) + } + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchBeginListLearnedPrefixes(req *http.Request) (*http.Response, error) { + if a.srv.BeginListLearnedPrefixes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListLearnedPrefixes not implemented")} + } + beginListLearnedPrefixes := a.beginListLearnedPrefixes.get(req) + if beginListLearnedPrefixes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/learnedIPPrefixes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + azureFirewallNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("azureFirewallName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginListLearnedPrefixes(req.Context(), resourceGroupNameParam, azureFirewallNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListLearnedPrefixes = &respr + a.beginListLearnedPrefixes.add(req, beginListLearnedPrefixes) + } + + resp, err := server.PollerResponderNext(beginListLearnedPrefixes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginListLearnedPrefixes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListLearnedPrefixes) { + a.beginListLearnedPrefixes.remove(req) + } + + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchBeginPacketCapture(req *http.Request) (*http.Response, error) { + if a.srv.BeginPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPacketCapture not implemented")} + } + beginPacketCapture := a.beginPacketCapture.get(req) + if beginPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/packetCapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.FirewallPacketCaptureParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + azureFirewallNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("azureFirewallName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginPacketCapture(req.Context(), resourceGroupNameParam, azureFirewallNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPacketCapture = &respr + a.beginPacketCapture.add(req, beginPacketCapture) + } + + resp, err := server.PollerResponderNext(beginPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted}, resp.StatusCode) { + a.beginPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPacketCapture) { + a.beginPacketCapture.remove(req) + } + + return resp, nil +} + +func (a *AzureFirewallsServerTransport) dispatchBeginUpdateTags(req *http.Request) (*http.Response, error) { + if a.srv.BeginUpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateTags not implemented")} + } + beginUpdateTags := a.beginUpdateTags.get(req) + if beginUpdateTags == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureFirewalls/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + azureFirewallNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("azureFirewallName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginUpdateTags(req.Context(), resourceGroupNameParam, azureFirewallNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateTags = &respr + a.beginUpdateTags.add(req, beginUpdateTags) + } + + resp, err := server.PollerResponderNext(beginUpdateTags, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginUpdateTags.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateTags) { + a.beginUpdateTags.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/bastionhosts_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/bastionhosts_server.go new file mode 100644 index 00000000000..d3bb80beea6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/bastionhosts_server.go @@ -0,0 +1,353 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// BastionHostsServer is a fake server for instances of the armnetwork.BastionHostsClient type. +type BastionHostsServer struct { + // BeginCreateOrUpdate is the fake for method BastionHostsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, bastionHostName string, parameters armnetwork.BastionHost, options *armnetwork.BastionHostsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.BastionHostsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method BastionHostsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, bastionHostName string, options *armnetwork.BastionHostsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.BastionHostsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method BastionHostsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, bastionHostName string, options *armnetwork.BastionHostsClientGetOptions) (resp azfake.Responder[armnetwork.BastionHostsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method BastionHostsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.BastionHostsClientListOptions) (resp azfake.PagerResponder[armnetwork.BastionHostsClientListResponse]) + + // NewListByResourceGroupPager is the fake for method BastionHostsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.BastionHostsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.BastionHostsClientListByResourceGroupResponse]) + + // BeginUpdateTags is the fake for method BastionHostsClient.BeginUpdateTags + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateTags func(ctx context.Context, resourceGroupName string, bastionHostName string, parameters armnetwork.TagsObject, options *armnetwork.BastionHostsClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armnetwork.BastionHostsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewBastionHostsServerTransport creates a new instance of BastionHostsServerTransport with the provided implementation. +// The returned BastionHostsServerTransport instance is connected to an instance of armnetwork.BastionHostsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewBastionHostsServerTransport(srv *BastionHostsServer) *BastionHostsServerTransport { + return &BastionHostsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.BastionHostsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.BastionHostsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.BastionHostsClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.BastionHostsClientListByResourceGroupResponse]](), + beginUpdateTags: newTracker[azfake.PollerResponder[armnetwork.BastionHostsClientUpdateTagsResponse]](), + } +} + +// BastionHostsServerTransport connects instances of armnetwork.BastionHostsClient to instances of BastionHostsServer. +// Don't use this type directly, use NewBastionHostsServerTransport instead. +type BastionHostsServerTransport struct { + srv *BastionHostsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.BastionHostsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.BastionHostsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.BastionHostsClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.BastionHostsClientListByResourceGroupResponse]] + beginUpdateTags *tracker[azfake.PollerResponder[armnetwork.BastionHostsClientUpdateTagsResponse]] +} + +// Do implements the policy.Transporter interface for BastionHostsServerTransport. +func (b *BastionHostsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "BastionHostsClient.BeginCreateOrUpdate": + resp, err = b.dispatchBeginCreateOrUpdate(req) + case "BastionHostsClient.BeginDelete": + resp, err = b.dispatchBeginDelete(req) + case "BastionHostsClient.Get": + resp, err = b.dispatchGet(req) + case "BastionHostsClient.NewListPager": + resp, err = b.dispatchNewListPager(req) + case "BastionHostsClient.NewListByResourceGroupPager": + resp, err = b.dispatchNewListByResourceGroupPager(req) + case "BastionHostsClient.BeginUpdateTags": + resp, err = b.dispatchBeginUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (b *BastionHostsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if b.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := b.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.BastionHost](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + respr, errRespr := b.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, bastionHostNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + b.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + b.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + b.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (b *BastionHostsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if b.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := b.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + respr, errRespr := b.srv.BeginDelete(req.Context(), resourceGroupNameParam, bastionHostNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + b.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + b.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + b.beginDelete.remove(req) + } + + return resp, nil +} + +func (b *BastionHostsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if b.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + respr, errRespr := b.srv.Get(req.Context(), resourceGroupNameParam, bastionHostNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).BastionHost, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (b *BastionHostsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if b.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := b.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := b.srv.NewListPager(nil) + newListPager = &resp + b.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.BastionHostsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + b.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + b.newListPager.remove(req) + } + return resp, nil +} + +func (b *BastionHostsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if b.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := b.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := b.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + b.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.BastionHostsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + b.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + b.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (b *BastionHostsServerTransport) dispatchBeginUpdateTags(req *http.Request) (*http.Response, error) { + if b.srv.BeginUpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateTags not implemented")} + } + beginUpdateTags := b.beginUpdateTags.get(req) + if beginUpdateTags == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + respr, errRespr := b.srv.BeginUpdateTags(req.Context(), resourceGroupNameParam, bastionHostNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateTags = &respr + b.beginUpdateTags.add(req, beginUpdateTags) + } + + resp, err := server.PollerResponderNext(beginUpdateTags, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + b.beginUpdateTags.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateTags) { + b.beginUpdateTags.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/bgpservicecommunities_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/bgpservicecommunities_server.go new file mode 100644 index 00000000000..a92b6618346 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/bgpservicecommunities_server.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "regexp" +) + +// BgpServiceCommunitiesServer is a fake server for instances of the armnetwork.BgpServiceCommunitiesClient type. +type BgpServiceCommunitiesServer struct { + // NewListPager is the fake for method BgpServiceCommunitiesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.BgpServiceCommunitiesClientListOptions) (resp azfake.PagerResponder[armnetwork.BgpServiceCommunitiesClientListResponse]) +} + +// NewBgpServiceCommunitiesServerTransport creates a new instance of BgpServiceCommunitiesServerTransport with the provided implementation. +// The returned BgpServiceCommunitiesServerTransport instance is connected to an instance of armnetwork.BgpServiceCommunitiesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewBgpServiceCommunitiesServerTransport(srv *BgpServiceCommunitiesServer) *BgpServiceCommunitiesServerTransport { + return &BgpServiceCommunitiesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.BgpServiceCommunitiesClientListResponse]](), + } +} + +// BgpServiceCommunitiesServerTransport connects instances of armnetwork.BgpServiceCommunitiesClient to instances of BgpServiceCommunitiesServer. +// Don't use this type directly, use NewBgpServiceCommunitiesServerTransport instead. +type BgpServiceCommunitiesServerTransport struct { + srv *BgpServiceCommunitiesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.BgpServiceCommunitiesClientListResponse]] +} + +// Do implements the policy.Transporter interface for BgpServiceCommunitiesServerTransport. +func (b *BgpServiceCommunitiesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "BgpServiceCommunitiesClient.NewListPager": + resp, err = b.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (b *BgpServiceCommunitiesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if b.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := b.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bgpServiceCommunities` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := b.srv.NewListPager(nil) + newListPager = &resp + b.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.BgpServiceCommunitiesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + b.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + b.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/configurationpolicygroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/configurationpolicygroups_server.go new file mode 100644 index 00000000000..3763742c65e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/configurationpolicygroups_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ConfigurationPolicyGroupsServer is a fake server for instances of the armnetwork.ConfigurationPolicyGroupsClient type. +type ConfigurationPolicyGroupsServer struct { + // BeginCreateOrUpdate is the fake for method ConfigurationPolicyGroupsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, configurationPolicyGroupName string, vpnServerConfigurationPolicyGroupParameters armnetwork.VPNServerConfigurationPolicyGroup, options *armnetwork.ConfigurationPolicyGroupsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ConfigurationPolicyGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ConfigurationPolicyGroupsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, configurationPolicyGroupName string, options *armnetwork.ConfigurationPolicyGroupsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ConfigurationPolicyGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ConfigurationPolicyGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, configurationPolicyGroupName string, options *armnetwork.ConfigurationPolicyGroupsClientGetOptions) (resp azfake.Responder[armnetwork.ConfigurationPolicyGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByVPNServerConfigurationPager is the fake for method ConfigurationPolicyGroupsClient.NewListByVPNServerConfigurationPager + // HTTP status codes to indicate success: http.StatusOK + NewListByVPNServerConfigurationPager func(resourceGroupName string, vpnServerConfigurationName string, options *armnetwork.ConfigurationPolicyGroupsClientListByVPNServerConfigurationOptions) (resp azfake.PagerResponder[armnetwork.ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse]) +} + +// NewConfigurationPolicyGroupsServerTransport creates a new instance of ConfigurationPolicyGroupsServerTransport with the provided implementation. +// The returned ConfigurationPolicyGroupsServerTransport instance is connected to an instance of armnetwork.ConfigurationPolicyGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewConfigurationPolicyGroupsServerTransport(srv *ConfigurationPolicyGroupsServer) *ConfigurationPolicyGroupsServerTransport { + return &ConfigurationPolicyGroupsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ConfigurationPolicyGroupsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ConfigurationPolicyGroupsClientDeleteResponse]](), + newListByVPNServerConfigurationPager: newTracker[azfake.PagerResponder[armnetwork.ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse]](), + } +} + +// ConfigurationPolicyGroupsServerTransport connects instances of armnetwork.ConfigurationPolicyGroupsClient to instances of ConfigurationPolicyGroupsServer. +// Don't use this type directly, use NewConfigurationPolicyGroupsServerTransport instead. +type ConfigurationPolicyGroupsServerTransport struct { + srv *ConfigurationPolicyGroupsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ConfigurationPolicyGroupsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ConfigurationPolicyGroupsClientDeleteResponse]] + newListByVPNServerConfigurationPager *tracker[azfake.PagerResponder[armnetwork.ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse]] +} + +// Do implements the policy.Transporter interface for ConfigurationPolicyGroupsServerTransport. +func (c *ConfigurationPolicyGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ConfigurationPolicyGroupsClient.BeginCreateOrUpdate": + resp, err = c.dispatchBeginCreateOrUpdate(req) + case "ConfigurationPolicyGroupsClient.BeginDelete": + resp, err = c.dispatchBeginDelete(req) + case "ConfigurationPolicyGroupsClient.Get": + resp, err = c.dispatchGet(req) + case "ConfigurationPolicyGroupsClient.NewListByVPNServerConfigurationPager": + resp, err = c.dispatchNewListByVPNServerConfigurationPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *ConfigurationPolicyGroupsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if c.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := c.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/configurationPolicyGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNServerConfigurationPolicyGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + configurationPolicyGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationPolicyGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vpnServerConfigurationNameParam, configurationPolicyGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + c.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + c.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + c.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (c *ConfigurationPolicyGroupsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if c.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := c.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/configurationPolicyGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + configurationPolicyGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationPolicyGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginDelete(req.Context(), resourceGroupNameParam, vpnServerConfigurationNameParam, configurationPolicyGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + c.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + c.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + c.beginDelete.remove(req) + } + + return resp, nil +} + +func (c *ConfigurationPolicyGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/configurationPolicyGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + configurationPolicyGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationPolicyGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), resourceGroupNameParam, vpnServerConfigurationNameParam, configurationPolicyGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNServerConfigurationPolicyGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ConfigurationPolicyGroupsServerTransport) dispatchNewListByVPNServerConfigurationPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListByVPNServerConfigurationPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByVPNServerConfigurationPager not implemented")} + } + newListByVPNServerConfigurationPager := c.newListByVPNServerConfigurationPager.get(req) + if newListByVPNServerConfigurationPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/configurationPolicyGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListByVPNServerConfigurationPager(resourceGroupNameParam, vpnServerConfigurationNameParam, nil) + newListByVPNServerConfigurationPager = &resp + c.newListByVPNServerConfigurationPager.add(req, newListByVPNServerConfigurationPager) + server.PagerResponderInjectNextLinks(newListByVPNServerConfigurationPager, req, func(page *armnetwork.ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByVPNServerConfigurationPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListByVPNServerConfigurationPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByVPNServerConfigurationPager) { + c.newListByVPNServerConfigurationPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/connectionmonitors_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/connectionmonitors_server.go new file mode 100644 index 00000000000..4ee3ea8d23f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/connectionmonitors_server.go @@ -0,0 +1,495 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ConnectionMonitorsServer is a fake server for instances of the armnetwork.ConnectionMonitorsClient type. +type ConnectionMonitorsServer struct { + // BeginCreateOrUpdate is the fake for method ConnectionMonitorsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, parameters armnetwork.ConnectionMonitor, options *armnetwork.ConnectionMonitorsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ConnectionMonitorsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ConnectionMonitorsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *armnetwork.ConnectionMonitorsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ConnectionMonitorsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ConnectionMonitorsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *armnetwork.ConnectionMonitorsClientGetOptions) (resp azfake.Responder[armnetwork.ConnectionMonitorsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ConnectionMonitorsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkWatcherName string, options *armnetwork.ConnectionMonitorsClientListOptions) (resp azfake.PagerResponder[armnetwork.ConnectionMonitorsClientListResponse]) + + // BeginQuery is the fake for method ConnectionMonitorsClient.BeginQuery + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginQuery func(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *armnetwork.ConnectionMonitorsClientBeginQueryOptions) (resp azfake.PollerResponder[armnetwork.ConnectionMonitorsClientQueryResponse], errResp azfake.ErrorResponder) + + // BeginStart is the fake for method ConnectionMonitorsClient.BeginStart + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStart func(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *armnetwork.ConnectionMonitorsClientBeginStartOptions) (resp azfake.PollerResponder[armnetwork.ConnectionMonitorsClientStartResponse], errResp azfake.ErrorResponder) + + // BeginStop is the fake for method ConnectionMonitorsClient.BeginStop + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStop func(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *armnetwork.ConnectionMonitorsClientBeginStopOptions) (resp azfake.PollerResponder[armnetwork.ConnectionMonitorsClientStopResponse], errResp azfake.ErrorResponder) + + // UpdateTags is the fake for method ConnectionMonitorsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, parameters armnetwork.TagsObject, options *armnetwork.ConnectionMonitorsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ConnectionMonitorsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewConnectionMonitorsServerTransport creates a new instance of ConnectionMonitorsServerTransport with the provided implementation. +// The returned ConnectionMonitorsServerTransport instance is connected to an instance of armnetwork.ConnectionMonitorsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewConnectionMonitorsServerTransport(srv *ConnectionMonitorsServer) *ConnectionMonitorsServerTransport { + return &ConnectionMonitorsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ConnectionMonitorsClientListResponse]](), + beginQuery: newTracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientQueryResponse]](), + beginStart: newTracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientStartResponse]](), + beginStop: newTracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientStopResponse]](), + } +} + +// ConnectionMonitorsServerTransport connects instances of armnetwork.ConnectionMonitorsClient to instances of ConnectionMonitorsServer. +// Don't use this type directly, use NewConnectionMonitorsServerTransport instead. +type ConnectionMonitorsServerTransport struct { + srv *ConnectionMonitorsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ConnectionMonitorsClientListResponse]] + beginQuery *tracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientQueryResponse]] + beginStart *tracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientStartResponse]] + beginStop *tracker[azfake.PollerResponder[armnetwork.ConnectionMonitorsClientStopResponse]] +} + +// Do implements the policy.Transporter interface for ConnectionMonitorsServerTransport. +func (c *ConnectionMonitorsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ConnectionMonitorsClient.BeginCreateOrUpdate": + resp, err = c.dispatchBeginCreateOrUpdate(req) + case "ConnectionMonitorsClient.BeginDelete": + resp, err = c.dispatchBeginDelete(req) + case "ConnectionMonitorsClient.Get": + resp, err = c.dispatchGet(req) + case "ConnectionMonitorsClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + case "ConnectionMonitorsClient.BeginQuery": + resp, err = c.dispatchBeginQuery(req) + case "ConnectionMonitorsClient.BeginStart": + resp, err = c.dispatchBeginStart(req) + case "ConnectionMonitorsClient.BeginStop": + resp, err = c.dispatchBeginStop(req) + case "ConnectionMonitorsClient.UpdateTags": + resp, err = c.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if c.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := c.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.ConnectionMonitor](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + connectionMonitorNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionMonitorName")]) + if err != nil { + return nil, err + } + migrateUnescaped, err := url.QueryUnescape(qp.Get("migrate")) + if err != nil { + return nil, err + } + migrateParam := getOptional(migrateUnescaped) + var options *armnetwork.ConnectionMonitorsClientBeginCreateOrUpdateOptions + if migrateParam != nil { + options = &armnetwork.ConnectionMonitorsClientBeginCreateOrUpdateOptions{ + Migrate: migrateParam, + } + } + respr, errRespr := c.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkWatcherNameParam, connectionMonitorNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + c.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + c.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + c.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if c.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := c.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + connectionMonitorNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionMonitorName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkWatcherNameParam, connectionMonitorNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + c.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + c.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + c.beginDelete.remove(req) + } + + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + connectionMonitorNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionMonitorName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), resourceGroupNameParam, networkWatcherNameParam, connectionMonitorNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ConnectionMonitorResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(resourceGroupNameParam, networkWatcherNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchBeginQuery(req *http.Request) (*http.Response, error) { + if c.srv.BeginQuery == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginQuery not implemented")} + } + beginQuery := c.beginQuery.get(req) + if beginQuery == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/query` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + connectionMonitorNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionMonitorName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginQuery(req.Context(), resourceGroupNameParam, networkWatcherNameParam, connectionMonitorNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginQuery = &respr + c.beginQuery.add(req, beginQuery) + } + + resp, err := server.PollerResponderNext(beginQuery, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginQuery.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginQuery) { + c.beginQuery.remove(req) + } + + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchBeginStart(req *http.Request) (*http.Response, error) { + if c.srv.BeginStart == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStart not implemented")} + } + beginStart := c.beginStart.get(req) + if beginStart == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/start` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + connectionMonitorNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionMonitorName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginStart(req.Context(), resourceGroupNameParam, networkWatcherNameParam, connectionMonitorNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStart = &respr + c.beginStart.add(req, beginStart) + } + + resp, err := server.PollerResponderNext(beginStart, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginStart.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStart) { + c.beginStart.remove(req) + } + + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchBeginStop(req *http.Request) (*http.Response, error) { + if c.srv.BeginStop == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStop not implemented")} + } + beginStop := c.beginStop.get(req) + if beginStop == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stop` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + connectionMonitorNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionMonitorName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginStop(req.Context(), resourceGroupNameParam, networkWatcherNameParam, connectionMonitorNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStop = &respr + c.beginStop.add(req, beginStop) + } + + resp, err := server.PollerResponderNext(beginStop, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginStop.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStop) { + c.beginStop.remove(req) + } + + return resp, nil +} + +func (c *ConnectionMonitorsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if c.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectionMonitors/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + connectionMonitorNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionMonitorName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.UpdateTags(req.Context(), resourceGroupNameParam, networkWatcherNameParam, connectionMonitorNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ConnectionMonitorResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/connectivityconfigurations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/connectivityconfigurations_server.go new file mode 100644 index 00000000000..b83ad6059a7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/connectivityconfigurations_server.go @@ -0,0 +1,302 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// ConnectivityConfigurationsServer is a fake server for instances of the armnetwork.ConnectivityConfigurationsClient type. +type ConnectivityConfigurationsServer struct { + // CreateOrUpdate is the fake for method ConnectivityConfigurationsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, connectivityConfiguration armnetwork.ConnectivityConfiguration, options *armnetwork.ConnectivityConfigurationsClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.ConnectivityConfigurationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ConnectivityConfigurationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *armnetwork.ConnectivityConfigurationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ConnectivityConfigurationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ConnectivityConfigurationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *armnetwork.ConnectivityConfigurationsClientGetOptions) (resp azfake.Responder[armnetwork.ConnectivityConfigurationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ConnectivityConfigurationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkManagerName string, options *armnetwork.ConnectivityConfigurationsClientListOptions) (resp azfake.PagerResponder[armnetwork.ConnectivityConfigurationsClientListResponse]) +} + +// NewConnectivityConfigurationsServerTransport creates a new instance of ConnectivityConfigurationsServerTransport with the provided implementation. +// The returned ConnectivityConfigurationsServerTransport instance is connected to an instance of armnetwork.ConnectivityConfigurationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewConnectivityConfigurationsServerTransport(srv *ConnectivityConfigurationsServer) *ConnectivityConfigurationsServerTransport { + return &ConnectivityConfigurationsServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ConnectivityConfigurationsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ConnectivityConfigurationsClientListResponse]](), + } +} + +// ConnectivityConfigurationsServerTransport connects instances of armnetwork.ConnectivityConfigurationsClient to instances of ConnectivityConfigurationsServer. +// Don't use this type directly, use NewConnectivityConfigurationsServerTransport instead. +type ConnectivityConfigurationsServerTransport struct { + srv *ConnectivityConfigurationsServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.ConnectivityConfigurationsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ConnectivityConfigurationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ConnectivityConfigurationsServerTransport. +func (c *ConnectivityConfigurationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ConnectivityConfigurationsClient.CreateOrUpdate": + resp, err = c.dispatchCreateOrUpdate(req) + case "ConnectivityConfigurationsClient.BeginDelete": + resp, err = c.dispatchBeginDelete(req) + case "ConnectivityConfigurationsClient.Get": + resp, err = c.dispatchGet(req) + case "ConnectivityConfigurationsClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *ConnectivityConfigurationsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if c.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectivityConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ConnectivityConfiguration](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ConnectivityConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ConnectivityConfigurationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if c.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := c.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectivityConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + forceUnescaped, err := url.QueryUnescape(qp.Get("force")) + if err != nil { + return nil, err + } + forceParam, err := parseOptional(forceUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armnetwork.ConnectivityConfigurationsClientBeginDeleteOptions + if forceParam != nil { + options = &armnetwork.ConnectivityConfigurationsClientBeginDeleteOptions{ + Force: forceParam, + } + } + respr, errRespr := c.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + c.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + c.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + c.beginDelete.remove(req) + } + + return resp, nil +} + +func (c *ConnectivityConfigurationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectivityConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ConnectivityConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ConnectivityConfigurationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectivityConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.ConnectivityConfigurationsClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.ConnectivityConfigurationsClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := c.srv.NewListPager(resourceGroupNameParam, networkManagerNameParam, options) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ConnectivityConfigurationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/customipprefixes_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/customipprefixes_server.go new file mode 100644 index 00000000000..be2dc5f06d3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/customipprefixes_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// CustomIPPrefixesServer is a fake server for instances of the armnetwork.CustomIPPrefixesClient type. +type CustomIPPrefixesServer struct { + // BeginCreateOrUpdate is the fake for method CustomIPPrefixesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, customIPPrefixName string, parameters armnetwork.CustomIPPrefix, options *armnetwork.CustomIPPrefixesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.CustomIPPrefixesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method CustomIPPrefixesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, customIPPrefixName string, options *armnetwork.CustomIPPrefixesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.CustomIPPrefixesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method CustomIPPrefixesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, customIPPrefixName string, options *armnetwork.CustomIPPrefixesClientGetOptions) (resp azfake.Responder[armnetwork.CustomIPPrefixesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method CustomIPPrefixesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.CustomIPPrefixesClientListOptions) (resp azfake.PagerResponder[armnetwork.CustomIPPrefixesClientListResponse]) + + // NewListAllPager is the fake for method CustomIPPrefixesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.CustomIPPrefixesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.CustomIPPrefixesClientListAllResponse]) + + // UpdateTags is the fake for method CustomIPPrefixesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, customIPPrefixName string, parameters armnetwork.TagsObject, options *armnetwork.CustomIPPrefixesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.CustomIPPrefixesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewCustomIPPrefixesServerTransport creates a new instance of CustomIPPrefixesServerTransport with the provided implementation. +// The returned CustomIPPrefixesServerTransport instance is connected to an instance of armnetwork.CustomIPPrefixesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewCustomIPPrefixesServerTransport(srv *CustomIPPrefixesServer) *CustomIPPrefixesServerTransport { + return &CustomIPPrefixesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.CustomIPPrefixesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.CustomIPPrefixesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.CustomIPPrefixesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.CustomIPPrefixesClientListAllResponse]](), + } +} + +// CustomIPPrefixesServerTransport connects instances of armnetwork.CustomIPPrefixesClient to instances of CustomIPPrefixesServer. +// Don't use this type directly, use NewCustomIPPrefixesServerTransport instead. +type CustomIPPrefixesServerTransport struct { + srv *CustomIPPrefixesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.CustomIPPrefixesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.CustomIPPrefixesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.CustomIPPrefixesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.CustomIPPrefixesClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for CustomIPPrefixesServerTransport. +func (c *CustomIPPrefixesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "CustomIPPrefixesClient.BeginCreateOrUpdate": + resp, err = c.dispatchBeginCreateOrUpdate(req) + case "CustomIPPrefixesClient.BeginDelete": + resp, err = c.dispatchBeginDelete(req) + case "CustomIPPrefixesClient.Get": + resp, err = c.dispatchGet(req) + case "CustomIPPrefixesClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + case "CustomIPPrefixesClient.NewListAllPager": + resp, err = c.dispatchNewListAllPager(req) + case "CustomIPPrefixesClient.UpdateTags": + resp, err = c.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *CustomIPPrefixesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if c.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := c.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/customIpPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.CustomIPPrefix](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + customIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("customIpPrefixName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, customIPPrefixNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + c.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + c.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + c.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (c *CustomIPPrefixesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if c.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := c.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/customIpPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + customIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("customIpPrefixName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginDelete(req.Context(), resourceGroupNameParam, customIPPrefixNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + c.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + c.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + c.beginDelete.remove(req) + } + + return resp, nil +} + +func (c *CustomIPPrefixesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if c.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/customIpPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + customIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("customIpPrefixName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.CustomIPPrefixesClientGetOptions + if expandParam != nil { + options = &armnetwork.CustomIPPrefixesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := c.srv.Get(req.Context(), resourceGroupNameParam, customIPPrefixNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CustomIPPrefix, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *CustomIPPrefixesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/customIpPrefixes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.CustomIPPrefixesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} + +func (c *CustomIPPrefixesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := c.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/customIpPrefixes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := c.srv.NewListAllPager(nil) + newListAllPager = &resp + c.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.CustomIPPrefixesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + c.newListAllPager.remove(req) + } + return resp, nil +} + +func (c *CustomIPPrefixesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if c.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/customIpPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + customIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("customIpPrefixName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.UpdateTags(req.Context(), resourceGroupNameParam, customIPPrefixNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).CustomIPPrefix, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ddoscustompolicies_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ddoscustompolicies_server.go new file mode 100644 index 00000000000..5a3a0feef03 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ddoscustompolicies_server.go @@ -0,0 +1,253 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// DdosCustomPoliciesServer is a fake server for instances of the armnetwork.DdosCustomPoliciesClient type. +type DdosCustomPoliciesServer struct { + // BeginCreateOrUpdate is the fake for method DdosCustomPoliciesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, parameters armnetwork.DdosCustomPolicy, options *armnetwork.DdosCustomPoliciesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.DdosCustomPoliciesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method DdosCustomPoliciesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, options *armnetwork.DdosCustomPoliciesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.DdosCustomPoliciesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DdosCustomPoliciesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, options *armnetwork.DdosCustomPoliciesClientGetOptions) (resp azfake.Responder[armnetwork.DdosCustomPoliciesClientGetResponse], errResp azfake.ErrorResponder) + + // UpdateTags is the fake for method DdosCustomPoliciesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, parameters armnetwork.TagsObject, options *armnetwork.DdosCustomPoliciesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.DdosCustomPoliciesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewDdosCustomPoliciesServerTransport creates a new instance of DdosCustomPoliciesServerTransport with the provided implementation. +// The returned DdosCustomPoliciesServerTransport instance is connected to an instance of armnetwork.DdosCustomPoliciesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDdosCustomPoliciesServerTransport(srv *DdosCustomPoliciesServer) *DdosCustomPoliciesServerTransport { + return &DdosCustomPoliciesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.DdosCustomPoliciesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.DdosCustomPoliciesClientDeleteResponse]](), + } +} + +// DdosCustomPoliciesServerTransport connects instances of armnetwork.DdosCustomPoliciesClient to instances of DdosCustomPoliciesServer. +// Don't use this type directly, use NewDdosCustomPoliciesServerTransport instead. +type DdosCustomPoliciesServerTransport struct { + srv *DdosCustomPoliciesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.DdosCustomPoliciesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.DdosCustomPoliciesClientDeleteResponse]] +} + +// Do implements the policy.Transporter interface for DdosCustomPoliciesServerTransport. +func (d *DdosCustomPoliciesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DdosCustomPoliciesClient.BeginCreateOrUpdate": + resp, err = d.dispatchBeginCreateOrUpdate(req) + case "DdosCustomPoliciesClient.BeginDelete": + resp, err = d.dispatchBeginDelete(req) + case "DdosCustomPoliciesClient.Get": + resp, err = d.dispatchGet(req) + case "DdosCustomPoliciesClient.UpdateTags": + resp, err = d.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DdosCustomPoliciesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := d.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosCustomPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.DdosCustomPolicy](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosCustomPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosCustomPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, ddosCustomPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + d.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + d.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + d.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (d *DdosCustomPoliciesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if d.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := d.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosCustomPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosCustomPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosCustomPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDelete(req.Context(), resourceGroupNameParam, ddosCustomPolicyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + d.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + d.beginDelete.remove(req) + } + + return resp, nil +} + +func (d *DdosCustomPoliciesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosCustomPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosCustomPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosCustomPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, ddosCustomPolicyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DdosCustomPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DdosCustomPoliciesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if d.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosCustomPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosCustomPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosCustomPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.UpdateTags(req.Context(), resourceGroupNameParam, ddosCustomPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DdosCustomPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ddosprotectionplans_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ddosprotectionplans_server.go new file mode 100644 index 00000000000..e1562e643a5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ddosprotectionplans_server.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// DdosProtectionPlansServer is a fake server for instances of the armnetwork.DdosProtectionPlansClient type. +type DdosProtectionPlansServer struct { + // BeginCreateOrUpdate is the fake for method DdosProtectionPlansClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, parameters armnetwork.DdosProtectionPlan, options *armnetwork.DdosProtectionPlansClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.DdosProtectionPlansClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method DdosProtectionPlansClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, options *armnetwork.DdosProtectionPlansClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.DdosProtectionPlansClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DdosProtectionPlansClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, options *armnetwork.DdosProtectionPlansClientGetOptions) (resp azfake.Responder[armnetwork.DdosProtectionPlansClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method DdosProtectionPlansClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.DdosProtectionPlansClientListOptions) (resp azfake.PagerResponder[armnetwork.DdosProtectionPlansClientListResponse]) + + // NewListByResourceGroupPager is the fake for method DdosProtectionPlansClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.DdosProtectionPlansClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.DdosProtectionPlansClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method DdosProtectionPlansClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, parameters armnetwork.TagsObject, options *armnetwork.DdosProtectionPlansClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.DdosProtectionPlansClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewDdosProtectionPlansServerTransport creates a new instance of DdosProtectionPlansServerTransport with the provided implementation. +// The returned DdosProtectionPlansServerTransport instance is connected to an instance of armnetwork.DdosProtectionPlansClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDdosProtectionPlansServerTransport(srv *DdosProtectionPlansServer) *DdosProtectionPlansServerTransport { + return &DdosProtectionPlansServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.DdosProtectionPlansClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.DdosProtectionPlansClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.DdosProtectionPlansClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.DdosProtectionPlansClientListByResourceGroupResponse]](), + } +} + +// DdosProtectionPlansServerTransport connects instances of armnetwork.DdosProtectionPlansClient to instances of DdosProtectionPlansServer. +// Don't use this type directly, use NewDdosProtectionPlansServerTransport instead. +type DdosProtectionPlansServerTransport struct { + srv *DdosProtectionPlansServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.DdosProtectionPlansClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.DdosProtectionPlansClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.DdosProtectionPlansClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.DdosProtectionPlansClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for DdosProtectionPlansServerTransport. +func (d *DdosProtectionPlansServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DdosProtectionPlansClient.BeginCreateOrUpdate": + resp, err = d.dispatchBeginCreateOrUpdate(req) + case "DdosProtectionPlansClient.BeginDelete": + resp, err = d.dispatchBeginDelete(req) + case "DdosProtectionPlansClient.Get": + resp, err = d.dispatchGet(req) + case "DdosProtectionPlansClient.NewListPager": + resp, err = d.dispatchNewListPager(req) + case "DdosProtectionPlansClient.NewListByResourceGroupPager": + resp, err = d.dispatchNewListByResourceGroupPager(req) + case "DdosProtectionPlansClient.UpdateTags": + resp, err = d.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DdosProtectionPlansServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := d.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosProtectionPlans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.DdosProtectionPlan](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosProtectionPlanNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosProtectionPlanName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, ddosProtectionPlanNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + d.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + d.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + d.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (d *DdosProtectionPlansServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if d.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := d.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosProtectionPlans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosProtectionPlanNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosProtectionPlanName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDelete(req.Context(), resourceGroupNameParam, ddosProtectionPlanNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + d.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + d.beginDelete.remove(req) + } + + return resp, nil +} + +func (d *DdosProtectionPlansServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosProtectionPlans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosProtectionPlanNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosProtectionPlanName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, ddosProtectionPlanNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DdosProtectionPlan, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DdosProtectionPlansServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := d.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosProtectionPlans` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := d.srv.NewListPager(nil) + newListPager = &resp + d.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.DdosProtectionPlansClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + d.newListPager.remove(req) + } + return resp, nil +} + +func (d *DdosProtectionPlansServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := d.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosProtectionPlans` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + d.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.DdosProtectionPlansClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + d.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (d *DdosProtectionPlansServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if d.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ddosProtectionPlans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ddosProtectionPlanNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ddosProtectionPlanName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.UpdateTags(req.Context(), resourceGroupNameParam, ddosProtectionPlanNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DdosProtectionPlan, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/defaultsecurityrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/defaultsecurityrules_server.go new file mode 100644 index 00000000000..33df95b444d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/defaultsecurityrules_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// DefaultSecurityRulesServer is a fake server for instances of the armnetwork.DefaultSecurityRulesClient type. +type DefaultSecurityRulesServer struct { + // Get is the fake for method DefaultSecurityRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, defaultSecurityRuleName string, options *armnetwork.DefaultSecurityRulesClientGetOptions) (resp azfake.Responder[armnetwork.DefaultSecurityRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method DefaultSecurityRulesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkSecurityGroupName string, options *armnetwork.DefaultSecurityRulesClientListOptions) (resp azfake.PagerResponder[armnetwork.DefaultSecurityRulesClientListResponse]) +} + +// NewDefaultSecurityRulesServerTransport creates a new instance of DefaultSecurityRulesServerTransport with the provided implementation. +// The returned DefaultSecurityRulesServerTransport instance is connected to an instance of armnetwork.DefaultSecurityRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDefaultSecurityRulesServerTransport(srv *DefaultSecurityRulesServer) *DefaultSecurityRulesServerTransport { + return &DefaultSecurityRulesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.DefaultSecurityRulesClientListResponse]](), + } +} + +// DefaultSecurityRulesServerTransport connects instances of armnetwork.DefaultSecurityRulesClient to instances of DefaultSecurityRulesServer. +// Don't use this type directly, use NewDefaultSecurityRulesServerTransport instead. +type DefaultSecurityRulesServerTransport struct { + srv *DefaultSecurityRulesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.DefaultSecurityRulesClientListResponse]] +} + +// Do implements the policy.Transporter interface for DefaultSecurityRulesServerTransport. +func (d *DefaultSecurityRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DefaultSecurityRulesClient.Get": + resp, err = d.dispatchGet(req) + case "DefaultSecurityRulesClient.NewListPager": + resp, err = d.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DefaultSecurityRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/defaultSecurityRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + defaultSecurityRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("defaultSecurityRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, defaultSecurityRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DefaultSecurityRulesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := d.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/defaultSecurityRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListPager(resourceGroupNameParam, networkSecurityGroupNameParam, nil) + newListPager = &resp + d.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.DefaultSecurityRulesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + d.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/dscpconfiguration_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/dscpconfiguration_server.go new file mode 100644 index 00000000000..5de861ba839 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/dscpconfiguration_server.go @@ -0,0 +1,297 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// DscpConfigurationServer is a fake server for instances of the armnetwork.DscpConfigurationClient type. +type DscpConfigurationServer struct { + // BeginCreateOrUpdate is the fake for method DscpConfigurationClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, dscpConfigurationName string, parameters armnetwork.DscpConfiguration, options *armnetwork.DscpConfigurationClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.DscpConfigurationClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method DscpConfigurationClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, dscpConfigurationName string, options *armnetwork.DscpConfigurationClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.DscpConfigurationClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method DscpConfigurationClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, dscpConfigurationName string, options *armnetwork.DscpConfigurationClientGetOptions) (resp azfake.Responder[armnetwork.DscpConfigurationClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method DscpConfigurationClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.DscpConfigurationClientListOptions) (resp azfake.PagerResponder[armnetwork.DscpConfigurationClientListResponse]) + + // NewListAllPager is the fake for method DscpConfigurationClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.DscpConfigurationClientListAllOptions) (resp azfake.PagerResponder[armnetwork.DscpConfigurationClientListAllResponse]) +} + +// NewDscpConfigurationServerTransport creates a new instance of DscpConfigurationServerTransport with the provided implementation. +// The returned DscpConfigurationServerTransport instance is connected to an instance of armnetwork.DscpConfigurationClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewDscpConfigurationServerTransport(srv *DscpConfigurationServer) *DscpConfigurationServerTransport { + return &DscpConfigurationServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.DscpConfigurationClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.DscpConfigurationClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.DscpConfigurationClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.DscpConfigurationClientListAllResponse]](), + } +} + +// DscpConfigurationServerTransport connects instances of armnetwork.DscpConfigurationClient to instances of DscpConfigurationServer. +// Don't use this type directly, use NewDscpConfigurationServerTransport instead. +type DscpConfigurationServerTransport struct { + srv *DscpConfigurationServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.DscpConfigurationClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.DscpConfigurationClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.DscpConfigurationClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.DscpConfigurationClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for DscpConfigurationServerTransport. +func (d *DscpConfigurationServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "DscpConfigurationClient.BeginCreateOrUpdate": + resp, err = d.dispatchBeginCreateOrUpdate(req) + case "DscpConfigurationClient.BeginDelete": + resp, err = d.dispatchBeginDelete(req) + case "DscpConfigurationClient.Get": + resp, err = d.dispatchGet(req) + case "DscpConfigurationClient.NewListPager": + resp, err = d.dispatchNewListPager(req) + case "DscpConfigurationClient.NewListAllPager": + resp, err = d.dispatchNewListAllPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (d *DscpConfigurationServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if d.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := d.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/dscpConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.DscpConfiguration](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + dscpConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("dscpConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, dscpConfigurationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + d.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + d.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + d.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (d *DscpConfigurationServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if d.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := d.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/dscpConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + dscpConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("dscpConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.BeginDelete(req.Context(), resourceGroupNameParam, dscpConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + d.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + d.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + d.beginDelete.remove(req) + } + + return resp, nil +} + +func (d *DscpConfigurationServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if d.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/dscpConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + dscpConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("dscpConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := d.srv.Get(req.Context(), resourceGroupNameParam, dscpConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DscpConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (d *DscpConfigurationServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := d.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/dscpConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := d.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + d.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.DscpConfigurationClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + d.newListPager.remove(req) + } + return resp, nil +} + +func (d *DscpConfigurationServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if d.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := d.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/dscpConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := d.srv.NewListAllPager(nil) + newListAllPager = &resp + d.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.DscpConfigurationClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + d.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + d.newListAllPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitauthorizations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitauthorizations_server.go new file mode 100644 index 00000000000..35842663701 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitauthorizations_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteCircuitAuthorizationsServer is a fake server for instances of the armnetwork.ExpressRouteCircuitAuthorizationsClient type. +type ExpressRouteCircuitAuthorizationsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteCircuitAuthorizationsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, authorizationParameters armnetwork.ExpressRouteCircuitAuthorization, options *armnetwork.ExpressRouteCircuitAuthorizationsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRouteCircuitAuthorizationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, options *armnetwork.ExpressRouteCircuitAuthorizationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteCircuitAuthorizationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, options *armnetwork.ExpressRouteCircuitAuthorizationsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteCircuitAuthorizationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRouteCircuitAuthorizationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, circuitName string, options *armnetwork.ExpressRouteCircuitAuthorizationsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientListResponse]) +} + +// NewExpressRouteCircuitAuthorizationsServerTransport creates a new instance of ExpressRouteCircuitAuthorizationsServerTransport with the provided implementation. +// The returned ExpressRouteCircuitAuthorizationsServerTransport instance is connected to an instance of armnetwork.ExpressRouteCircuitAuthorizationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteCircuitAuthorizationsServerTransport(srv *ExpressRouteCircuitAuthorizationsServer) *ExpressRouteCircuitAuthorizationsServerTransport { + return &ExpressRouteCircuitAuthorizationsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientListResponse]](), + } +} + +// ExpressRouteCircuitAuthorizationsServerTransport connects instances of armnetwork.ExpressRouteCircuitAuthorizationsClient to instances of ExpressRouteCircuitAuthorizationsServer. +// Don't use this type directly, use NewExpressRouteCircuitAuthorizationsServerTransport instead. +type ExpressRouteCircuitAuthorizationsServerTransport struct { + srv *ExpressRouteCircuitAuthorizationsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitAuthorizationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteCircuitAuthorizationsServerTransport. +func (e *ExpressRouteCircuitAuthorizationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteCircuitAuthorizationsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteCircuitAuthorizationsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRouteCircuitAuthorizationsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteCircuitAuthorizationsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteCircuitAuthorizationsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteCircuitAuthorization](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + authorizationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("authorizationName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, circuitNameParam, authorizationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitAuthorizationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + authorizationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("authorizationName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, circuitNameParam, authorizationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitAuthorizationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + authorizationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("authorizationName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, circuitNameParam, authorizationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCircuitAuthorization, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCircuitAuthorizationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListPager(resourceGroupNameParam, circuitNameParam, nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteCircuitAuthorizationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitconnections_server.go new file mode 100644 index 00000000000..de1e1657f5c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitconnections_server.go @@ -0,0 +1,288 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteCircuitConnectionsServer is a fake server for instances of the armnetwork.ExpressRouteCircuitConnectionsClient type. +type ExpressRouteCircuitConnectionsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteCircuitConnectionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, expressRouteCircuitConnectionParameters armnetwork.ExpressRouteCircuitConnection, options *armnetwork.ExpressRouteCircuitConnectionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRouteCircuitConnectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, options *armnetwork.ExpressRouteCircuitConnectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteCircuitConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, options *armnetwork.ExpressRouteCircuitConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteCircuitConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRouteCircuitConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, circuitName string, peeringName string, options *armnetwork.ExpressRouteCircuitConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCircuitConnectionsClientListResponse]) +} + +// NewExpressRouteCircuitConnectionsServerTransport creates a new instance of ExpressRouteCircuitConnectionsServerTransport with the provided implementation. +// The returned ExpressRouteCircuitConnectionsServerTransport instance is connected to an instance of armnetwork.ExpressRouteCircuitConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteCircuitConnectionsServerTransport(srv *ExpressRouteCircuitConnectionsServer) *ExpressRouteCircuitConnectionsServerTransport { + return &ExpressRouteCircuitConnectionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitConnectionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitConnectionsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitConnectionsClientListResponse]](), + } +} + +// ExpressRouteCircuitConnectionsServerTransport connects instances of armnetwork.ExpressRouteCircuitConnectionsClient to instances of ExpressRouteCircuitConnectionsServer. +// Don't use this type directly, use NewExpressRouteCircuitConnectionsServerTransport instead. +type ExpressRouteCircuitConnectionsServerTransport struct { + srv *ExpressRouteCircuitConnectionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitConnectionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitConnectionsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitConnectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteCircuitConnectionsServerTransport. +func (e *ExpressRouteCircuitConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteCircuitConnectionsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteCircuitConnectionsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRouteCircuitConnectionsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteCircuitConnectionsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteCircuitConnectionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteCircuitConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, connectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitConnectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCircuitConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCircuitConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListPager(resourceGroupNameParam, circuitNameParam, peeringNameParam, nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteCircuitConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitpeerings_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitpeerings_server.go new file mode 100644 index 00000000000..e11c03e5f4d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuitpeerings_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteCircuitPeeringsServer is a fake server for instances of the armnetwork.ExpressRouteCircuitPeeringsClient type. +type ExpressRouteCircuitPeeringsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteCircuitPeeringsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, peeringParameters armnetwork.ExpressRouteCircuitPeering, options *armnetwork.ExpressRouteCircuitPeeringsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitPeeringsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRouteCircuitPeeringsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, options *armnetwork.ExpressRouteCircuitPeeringsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitPeeringsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteCircuitPeeringsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, options *armnetwork.ExpressRouteCircuitPeeringsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteCircuitPeeringsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRouteCircuitPeeringsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, circuitName string, options *armnetwork.ExpressRouteCircuitPeeringsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCircuitPeeringsClientListResponse]) +} + +// NewExpressRouteCircuitPeeringsServerTransport creates a new instance of ExpressRouteCircuitPeeringsServerTransport with the provided implementation. +// The returned ExpressRouteCircuitPeeringsServerTransport instance is connected to an instance of armnetwork.ExpressRouteCircuitPeeringsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteCircuitPeeringsServerTransport(srv *ExpressRouteCircuitPeeringsServer) *ExpressRouteCircuitPeeringsServerTransport { + return &ExpressRouteCircuitPeeringsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitPeeringsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitPeeringsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitPeeringsClientListResponse]](), + } +} + +// ExpressRouteCircuitPeeringsServerTransport connects instances of armnetwork.ExpressRouteCircuitPeeringsClient to instances of ExpressRouteCircuitPeeringsServer. +// Don't use this type directly, use NewExpressRouteCircuitPeeringsServerTransport instead. +type ExpressRouteCircuitPeeringsServerTransport struct { + srv *ExpressRouteCircuitPeeringsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitPeeringsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitPeeringsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitPeeringsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteCircuitPeeringsServerTransport. +func (e *ExpressRouteCircuitPeeringsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteCircuitPeeringsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteCircuitPeeringsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRouteCircuitPeeringsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteCircuitPeeringsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteCircuitPeeringsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteCircuitPeering](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitPeeringsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitPeeringsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCircuitPeering, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCircuitPeeringsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListPager(resourceGroupNameParam, circuitNameParam, nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteCircuitPeeringsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuits_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuits_server.go new file mode 100644 index 00000000000..00a8a93d211 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecircuits_server.go @@ -0,0 +1,602 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteCircuitsServer is a fake server for instances of the armnetwork.ExpressRouteCircuitsClient type. +type ExpressRouteCircuitsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteCircuitsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, circuitName string, parameters armnetwork.ExpressRouteCircuit, options *armnetwork.ExpressRouteCircuitsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRouteCircuitsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, circuitName string, options *armnetwork.ExpressRouteCircuitsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteCircuitsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, circuitName string, options *armnetwork.ExpressRouteCircuitsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteCircuitsClientGetResponse], errResp azfake.ErrorResponder) + + // GetPeeringStats is the fake for method ExpressRouteCircuitsClient.GetPeeringStats + // HTTP status codes to indicate success: http.StatusOK + GetPeeringStats func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, options *armnetwork.ExpressRouteCircuitsClientGetPeeringStatsOptions) (resp azfake.Responder[armnetwork.ExpressRouteCircuitsClientGetPeeringStatsResponse], errResp azfake.ErrorResponder) + + // GetStats is the fake for method ExpressRouteCircuitsClient.GetStats + // HTTP status codes to indicate success: http.StatusOK + GetStats func(ctx context.Context, resourceGroupName string, circuitName string, options *armnetwork.ExpressRouteCircuitsClientGetStatsOptions) (resp azfake.Responder[armnetwork.ExpressRouteCircuitsClientGetStatsResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRouteCircuitsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.ExpressRouteCircuitsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCircuitsClientListResponse]) + + // NewListAllPager is the fake for method ExpressRouteCircuitsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.ExpressRouteCircuitsClientListAllOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCircuitsClientListAllResponse]) + + // BeginListArpTable is the fake for method ExpressRouteCircuitsClient.BeginListArpTable + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListArpTable func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string, options *armnetwork.ExpressRouteCircuitsClientBeginListArpTableOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListArpTableResponse], errResp azfake.ErrorResponder) + + // BeginListRoutesTable is the fake for method ExpressRouteCircuitsClient.BeginListRoutesTable + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListRoutesTable func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string, options *armnetwork.ExpressRouteCircuitsClientBeginListRoutesTableOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListRoutesTableResponse], errResp azfake.ErrorResponder) + + // BeginListRoutesTableSummary is the fake for method ExpressRouteCircuitsClient.BeginListRoutesTableSummary + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListRoutesTableSummary func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string, options *armnetwork.ExpressRouteCircuitsClientBeginListRoutesTableSummaryOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListRoutesTableSummaryResponse], errResp azfake.ErrorResponder) + + // UpdateTags is the fake for method ExpressRouteCircuitsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, circuitName string, parameters armnetwork.TagsObject, options *armnetwork.ExpressRouteCircuitsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ExpressRouteCircuitsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewExpressRouteCircuitsServerTransport creates a new instance of ExpressRouteCircuitsServerTransport with the provided implementation. +// The returned ExpressRouteCircuitsServerTransport instance is connected to an instance of armnetwork.ExpressRouteCircuitsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteCircuitsServerTransport(srv *ExpressRouteCircuitsServer) *ExpressRouteCircuitsServerTransport { + return &ExpressRouteCircuitsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitsClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitsClientListAllResponse]](), + beginListArpTable: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListArpTableResponse]](), + beginListRoutesTable: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListRoutesTableResponse]](), + beginListRoutesTableSummary: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListRoutesTableSummaryResponse]](), + } +} + +// ExpressRouteCircuitsServerTransport connects instances of armnetwork.ExpressRouteCircuitsClient to instances of ExpressRouteCircuitsServer. +// Don't use this type directly, use NewExpressRouteCircuitsServerTransport instead. +type ExpressRouteCircuitsServerTransport struct { + srv *ExpressRouteCircuitsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitsClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCircuitsClientListAllResponse]] + beginListArpTable *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListArpTableResponse]] + beginListRoutesTable *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListRoutesTableResponse]] + beginListRoutesTableSummary *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCircuitsClientListRoutesTableSummaryResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteCircuitsServerTransport. +func (e *ExpressRouteCircuitsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteCircuitsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteCircuitsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRouteCircuitsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteCircuitsClient.GetPeeringStats": + resp, err = e.dispatchGetPeeringStats(req) + case "ExpressRouteCircuitsClient.GetStats": + resp, err = e.dispatchGetStats(req) + case "ExpressRouteCircuitsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + case "ExpressRouteCircuitsClient.NewListAllPager": + resp, err = e.dispatchNewListAllPager(req) + case "ExpressRouteCircuitsClient.BeginListArpTable": + resp, err = e.dispatchBeginListArpTable(req) + case "ExpressRouteCircuitsClient.BeginListRoutesTable": + resp, err = e.dispatchBeginListRoutesTable(req) + case "ExpressRouteCircuitsClient.BeginListRoutesTableSummary": + resp, err = e.dispatchBeginListRoutesTableSummary(req) + case "ExpressRouteCircuitsClient.UpdateTags": + resp, err = e.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteCircuit](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, circuitNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, circuitNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, circuitNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCircuit, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchGetPeeringStats(req *http.Request) (*http.Response, error) { + if e.srv.GetPeeringStats == nil { + return nil, &nonRetriableError{errors.New("fake for method GetPeeringStats not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stats` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.GetPeeringStats(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCircuitStats, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchGetStats(req *http.Request) (*http.Response, error) { + if e.srv.GetStats == nil { + return nil, &nonRetriableError{errors.New("fake for method GetStats not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stats` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.GetStats(req.Context(), resourceGroupNameParam, circuitNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCircuitStats, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteCircuitsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := e.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := e.srv.NewListAllPager(nil) + newListAllPager = &resp + e.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.ExpressRouteCircuitsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + e.newListAllPager.remove(req) + } + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchBeginListArpTable(req *http.Request) (*http.Response, error) { + if e.srv.BeginListArpTable == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListArpTable not implemented")} + } + beginListArpTable := e.beginListArpTable.get(req) + if beginListArpTable == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/arpTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + devicePathParam, err := url.PathUnescape(matches[regex.SubexpIndex("devicePath")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginListArpTable(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, devicePathParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListArpTable = &respr + e.beginListArpTable.add(req, beginListArpTable) + } + + resp, err := server.PollerResponderNext(beginListArpTable, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + e.beginListArpTable.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListArpTable) { + e.beginListArpTable.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchBeginListRoutesTable(req *http.Request) (*http.Response, error) { + if e.srv.BeginListRoutesTable == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListRoutesTable not implemented")} + } + beginListRoutesTable := e.beginListRoutesTable.get(req) + if beginListRoutesTable == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + devicePathParam, err := url.PathUnescape(matches[regex.SubexpIndex("devicePath")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginListRoutesTable(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, devicePathParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListRoutesTable = &respr + e.beginListRoutesTable.add(req, beginListRoutesTable) + } + + resp, err := server.PollerResponderNext(beginListRoutesTable, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + e.beginListRoutesTable.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListRoutesTable) { + e.beginListRoutesTable.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchBeginListRoutesTableSummary(req *http.Request) (*http.Response, error) { + if e.srv.BeginListRoutesTableSummary == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListRoutesTableSummary not implemented")} + } + beginListRoutesTableSummary := e.beginListRoutesTableSummary.get(req) + if beginListRoutesTableSummary == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTablesSummary/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + devicePathParam, err := url.PathUnescape(matches[regex.SubexpIndex("devicePath")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginListRoutesTableSummary(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, devicePathParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListRoutesTableSummary = &respr + e.beginListRoutesTableSummary.add(req, beginListRoutesTableSummary) + } + + resp, err := server.PollerResponderNext(beginListRoutesTableSummary, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + e.beginListRoutesTableSummary.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListRoutesTableSummary) { + e.beginListRoutesTableSummary.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCircuitsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if e.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.UpdateTags(req.Context(), resourceGroupNameParam, circuitNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCircuit, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteconnections_server.go new file mode 100644 index 00000000000..48e96852e39 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteconnections_server.go @@ -0,0 +1,261 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteConnectionsServer is a fake server for instances of the armnetwork.ExpressRouteConnectionsClient type. +type ExpressRouteConnectionsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteConnectionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, connectionName string, putExpressRouteConnectionParameters armnetwork.ExpressRouteConnection, options *armnetwork.ExpressRouteConnectionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRouteConnectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, connectionName string, options *armnetwork.ExpressRouteConnectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, connectionName string, options *armnetwork.ExpressRouteConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // List is the fake for method ExpressRouteConnectionsClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, options *armnetwork.ExpressRouteConnectionsClientListOptions) (resp azfake.Responder[armnetwork.ExpressRouteConnectionsClientListResponse], errResp azfake.ErrorResponder) +} + +// NewExpressRouteConnectionsServerTransport creates a new instance of ExpressRouteConnectionsServerTransport with the provided implementation. +// The returned ExpressRouteConnectionsServerTransport instance is connected to an instance of armnetwork.ExpressRouteConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteConnectionsServerTransport(srv *ExpressRouteConnectionsServer) *ExpressRouteConnectionsServerTransport { + return &ExpressRouteConnectionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteConnectionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteConnectionsClientDeleteResponse]](), + } +} + +// ExpressRouteConnectionsServerTransport connects instances of armnetwork.ExpressRouteConnectionsClient to instances of ExpressRouteConnectionsServer. +// Don't use this type directly, use NewExpressRouteConnectionsServerTransport instead. +type ExpressRouteConnectionsServerTransport struct { + srv *ExpressRouteConnectionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteConnectionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRouteConnectionsClientDeleteResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteConnectionsServerTransport. +func (e *ExpressRouteConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteConnectionsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteConnectionsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRouteConnectionsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteConnectionsClient.List": + resp, err = e.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteConnectionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/expressRouteConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, connectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteConnectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/expressRouteConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/expressRouteConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteConnectionsServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if e.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/expressRouteConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.List(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteConnectionList, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecrossconnectionpeerings_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecrossconnectionpeerings_server.go new file mode 100644 index 00000000000..8709269342e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecrossconnectionpeerings_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteCrossConnectionPeeringsServer is a fake server for instances of the armnetwork.ExpressRouteCrossConnectionPeeringsClient type. +type ExpressRouteCrossConnectionPeeringsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteCrossConnectionPeeringsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, peeringParameters armnetwork.ExpressRouteCrossConnectionPeering, options *armnetwork.ExpressRouteCrossConnectionPeeringsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRouteCrossConnectionPeeringsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, options *armnetwork.ExpressRouteCrossConnectionPeeringsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteCrossConnectionPeeringsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, options *armnetwork.ExpressRouteCrossConnectionPeeringsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteCrossConnectionPeeringsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRouteCrossConnectionPeeringsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, crossConnectionName string, options *armnetwork.ExpressRouteCrossConnectionPeeringsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientListResponse]) +} + +// NewExpressRouteCrossConnectionPeeringsServerTransport creates a new instance of ExpressRouteCrossConnectionPeeringsServerTransport with the provided implementation. +// The returned ExpressRouteCrossConnectionPeeringsServerTransport instance is connected to an instance of armnetwork.ExpressRouteCrossConnectionPeeringsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteCrossConnectionPeeringsServerTransport(srv *ExpressRouteCrossConnectionPeeringsServer) *ExpressRouteCrossConnectionPeeringsServerTransport { + return &ExpressRouteCrossConnectionPeeringsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientListResponse]](), + } +} + +// ExpressRouteCrossConnectionPeeringsServerTransport connects instances of armnetwork.ExpressRouteCrossConnectionPeeringsClient to instances of ExpressRouteCrossConnectionPeeringsServer. +// Don't use this type directly, use NewExpressRouteCrossConnectionPeeringsServerTransport instead. +type ExpressRouteCrossConnectionPeeringsServerTransport struct { + srv *ExpressRouteCrossConnectionPeeringsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionPeeringsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteCrossConnectionPeeringsServerTransport. +func (e *ExpressRouteCrossConnectionPeeringsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteCrossConnectionPeeringsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteCrossConnectionPeeringsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRouteCrossConnectionPeeringsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteCrossConnectionPeeringsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionPeeringsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteCrossConnectionPeering](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, crossConnectionNameParam, peeringNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionPeeringsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, crossConnectionNameParam, peeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionPeeringsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, crossConnectionNameParam, peeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCrossConnectionPeering, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCrossConnectionPeeringsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListPager(resourceGroupNameParam, crossConnectionNameParam, nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteCrossConnectionPeeringsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecrossconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecrossconnections_server.go new file mode 100644 index 00000000000..457a74e14d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutecrossconnections_server.go @@ -0,0 +1,468 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteCrossConnectionsServer is a fake server for instances of the armnetwork.ExpressRouteCrossConnectionsClient type. +type ExpressRouteCrossConnectionsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteCrossConnectionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, crossConnectionName string, parameters armnetwork.ExpressRouteCrossConnection, options *armnetwork.ExpressRouteCrossConnectionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteCrossConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, crossConnectionName string, options *armnetwork.ExpressRouteCrossConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteCrossConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRouteCrossConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.ExpressRouteCrossConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionsClientListResponse]) + + // BeginListArpTable is the fake for method ExpressRouteCrossConnectionsClient.BeginListArpTable + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListArpTable func(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string, options *armnetwork.ExpressRouteCrossConnectionsClientBeginListArpTableOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListArpTableResponse], errResp azfake.ErrorResponder) + + // NewListByResourceGroupPager is the fake for method ExpressRouteCrossConnectionsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.ExpressRouteCrossConnectionsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionsClientListByResourceGroupResponse]) + + // BeginListRoutesTable is the fake for method ExpressRouteCrossConnectionsClient.BeginListRoutesTable + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListRoutesTable func(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string, options *armnetwork.ExpressRouteCrossConnectionsClientBeginListRoutesTableOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListRoutesTableResponse], errResp azfake.ErrorResponder) + + // BeginListRoutesTableSummary is the fake for method ExpressRouteCrossConnectionsClient.BeginListRoutesTableSummary + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListRoutesTableSummary func(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string, options *armnetwork.ExpressRouteCrossConnectionsClientBeginListRoutesTableSummaryOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListRoutesTableSummaryResponse], errResp azfake.ErrorResponder) + + // UpdateTags is the fake for method ExpressRouteCrossConnectionsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, crossConnectionName string, crossConnectionParameters armnetwork.TagsObject, options *armnetwork.ExpressRouteCrossConnectionsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ExpressRouteCrossConnectionsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewExpressRouteCrossConnectionsServerTransport creates a new instance of ExpressRouteCrossConnectionsServerTransport with the provided implementation. +// The returned ExpressRouteCrossConnectionsServerTransport instance is connected to an instance of armnetwork.ExpressRouteCrossConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteCrossConnectionsServerTransport(srv *ExpressRouteCrossConnectionsServer) *ExpressRouteCrossConnectionsServerTransport { + return &ExpressRouteCrossConnectionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientCreateOrUpdateResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionsClientListResponse]](), + beginListArpTable: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListArpTableResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionsClientListByResourceGroupResponse]](), + beginListRoutesTable: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListRoutesTableResponse]](), + beginListRoutesTableSummary: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListRoutesTableSummaryResponse]](), + } +} + +// ExpressRouteCrossConnectionsServerTransport connects instances of armnetwork.ExpressRouteCrossConnectionsClient to instances of ExpressRouteCrossConnectionsServer. +// Don't use this type directly, use NewExpressRouteCrossConnectionsServerTransport instead. +type ExpressRouteCrossConnectionsServerTransport struct { + srv *ExpressRouteCrossConnectionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientCreateOrUpdateResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionsClientListResponse]] + beginListArpTable *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListArpTableResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteCrossConnectionsClientListByResourceGroupResponse]] + beginListRoutesTable *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListRoutesTableResponse]] + beginListRoutesTableSummary *tracker[azfake.PollerResponder[armnetwork.ExpressRouteCrossConnectionsClientListRoutesTableSummaryResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteCrossConnectionsServerTransport. +func (e *ExpressRouteCrossConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteCrossConnectionsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteCrossConnectionsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteCrossConnectionsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + case "ExpressRouteCrossConnectionsClient.BeginListArpTable": + resp, err = e.dispatchBeginListArpTable(req) + case "ExpressRouteCrossConnectionsClient.NewListByResourceGroupPager": + resp, err = e.dispatchNewListByResourceGroupPager(req) + case "ExpressRouteCrossConnectionsClient.BeginListRoutesTable": + resp, err = e.dispatchBeginListRoutesTable(req) + case "ExpressRouteCrossConnectionsClient.BeginListRoutesTableSummary": + resp, err = e.dispatchBeginListRoutesTableSummary(req) + case "ExpressRouteCrossConnectionsClient.UpdateTags": + resp, err = e.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteCrossConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, crossConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, crossConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCrossConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := e.srv.NewListPager(nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteCrossConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchBeginListArpTable(req *http.Request) (*http.Response, error) { + if e.srv.BeginListArpTable == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListArpTable not implemented")} + } + beginListArpTable := e.beginListArpTable.get(req) + if beginListArpTable == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/arpTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + devicePathParam, err := url.PathUnescape(matches[regex.SubexpIndex("devicePath")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginListArpTable(req.Context(), resourceGroupNameParam, crossConnectionNameParam, peeringNameParam, devicePathParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListArpTable = &respr + e.beginListArpTable.add(req, beginListArpTable) + } + + resp, err := server.PollerResponderNext(beginListArpTable, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + e.beginListArpTable.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListArpTable) { + e.beginListArpTable.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := e.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + e.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.ExpressRouteCrossConnectionsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + e.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchBeginListRoutesTable(req *http.Request) (*http.Response, error) { + if e.srv.BeginListRoutesTable == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListRoutesTable not implemented")} + } + beginListRoutesTable := e.beginListRoutesTable.get(req) + if beginListRoutesTable == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + devicePathParam, err := url.PathUnescape(matches[regex.SubexpIndex("devicePath")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginListRoutesTable(req.Context(), resourceGroupNameParam, crossConnectionNameParam, peeringNameParam, devicePathParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListRoutesTable = &respr + e.beginListRoutesTable.add(req, beginListRoutesTable) + } + + resp, err := server.PollerResponderNext(beginListRoutesTable, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + e.beginListRoutesTable.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListRoutesTable) { + e.beginListRoutesTable.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchBeginListRoutesTableSummary(req *http.Request) (*http.Response, error) { + if e.srv.BeginListRoutesTableSummary == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListRoutesTableSummary not implemented")} + } + beginListRoutesTableSummary := e.beginListRoutesTableSummary.get(req) + if beginListRoutesTableSummary == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTablesSummary/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + devicePathParam, err := url.PathUnescape(matches[regex.SubexpIndex("devicePath")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginListRoutesTableSummary(req.Context(), resourceGroupNameParam, crossConnectionNameParam, peeringNameParam, devicePathParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListRoutesTableSummary = &respr + e.beginListRoutesTableSummary.add(req, beginListRoutesTableSummary) + } + + resp, err := server.PollerResponderNext(beginListRoutesTableSummary, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + e.beginListRoutesTableSummary.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListRoutesTableSummary) { + e.beginListRoutesTableSummary.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteCrossConnectionsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if e.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCrossConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + crossConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("crossConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.UpdateTags(req.Context(), resourceGroupNameParam, crossConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteCrossConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutegateways_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutegateways_server.go new file mode 100644 index 00000000000..15c3103da46 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutegateways_server.go @@ -0,0 +1,332 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteGatewaysServer is a fake server for instances of the armnetwork.ExpressRouteGatewaysClient type. +type ExpressRouteGatewaysServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRouteGatewaysClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, putExpressRouteGatewayParameters armnetwork.ExpressRouteGateway, options *armnetwork.ExpressRouteGatewaysClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRouteGatewaysClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, options *armnetwork.ExpressRouteGatewaysClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRouteGatewaysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, options *armnetwork.ExpressRouteGatewaysClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteGatewaysClientGetResponse], errResp azfake.ErrorResponder) + + // ListByResourceGroup is the fake for method ExpressRouteGatewaysClient.ListByResourceGroup + // HTTP status codes to indicate success: http.StatusOK + ListByResourceGroup func(ctx context.Context, resourceGroupName string, options *armnetwork.ExpressRouteGatewaysClientListByResourceGroupOptions) (resp azfake.Responder[armnetwork.ExpressRouteGatewaysClientListByResourceGroupResponse], errResp azfake.ErrorResponder) + + // ListBySubscription is the fake for method ExpressRouteGatewaysClient.ListBySubscription + // HTTP status codes to indicate success: http.StatusOK + ListBySubscription func(ctx context.Context, options *armnetwork.ExpressRouteGatewaysClientListBySubscriptionOptions) (resp azfake.Responder[armnetwork.ExpressRouteGatewaysClientListBySubscriptionResponse], errResp azfake.ErrorResponder) + + // BeginUpdateTags is the fake for method ExpressRouteGatewaysClient.BeginUpdateTags + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateTags func(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, expressRouteGatewayParameters armnetwork.TagsObject, options *armnetwork.ExpressRouteGatewaysClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewExpressRouteGatewaysServerTransport creates a new instance of ExpressRouteGatewaysServerTransport with the provided implementation. +// The returned ExpressRouteGatewaysServerTransport instance is connected to an instance of armnetwork.ExpressRouteGatewaysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteGatewaysServerTransport(srv *ExpressRouteGatewaysServer) *ExpressRouteGatewaysServerTransport { + return &ExpressRouteGatewaysServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientDeleteResponse]](), + beginUpdateTags: newTracker[azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientUpdateTagsResponse]](), + } +} + +// ExpressRouteGatewaysServerTransport connects instances of armnetwork.ExpressRouteGatewaysClient to instances of ExpressRouteGatewaysServer. +// Don't use this type directly, use NewExpressRouteGatewaysServerTransport instead. +type ExpressRouteGatewaysServerTransport struct { + srv *ExpressRouteGatewaysServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientDeleteResponse]] + beginUpdateTags *tracker[azfake.PollerResponder[armnetwork.ExpressRouteGatewaysClientUpdateTagsResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteGatewaysServerTransport. +func (e *ExpressRouteGatewaysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteGatewaysClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRouteGatewaysClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRouteGatewaysClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteGatewaysClient.ListByResourceGroup": + resp, err = e.dispatchListByResourceGroup(req) + case "ExpressRouteGatewaysClient.ListBySubscription": + resp, err = e.dispatchListBySubscription(req) + case "ExpressRouteGatewaysClient.BeginUpdateTags": + resp, err = e.dispatchBeginUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteGatewaysServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRouteGateway](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteGatewaysServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRouteGatewaysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteGatewaysServerTransport) dispatchListByResourceGroup(req *http.Request) (*http.Response, error) { + if e.srv.ListByResourceGroup == nil { + return nil, &nonRetriableError{errors.New("fake for method ListByResourceGroup not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.ListByResourceGroup(req.Context(), resourceGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteGatewayList, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteGatewaysServerTransport) dispatchListBySubscription(req *http.Request) (*http.Response, error) { + if e.srv.ListBySubscription == nil { + return nil, &nonRetriableError{errors.New("fake for method ListBySubscription not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + respr, errRespr := e.srv.ListBySubscription(req.Context(), nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteGatewayList, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteGatewaysServerTransport) dispatchBeginUpdateTags(req *http.Request) (*http.Response, error) { + if e.srv.BeginUpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateTags not implemented")} + } + beginUpdateTags := e.beginUpdateTags.get(req) + if beginUpdateTags == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRouteGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRouteGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginUpdateTags(req.Context(), resourceGroupNameParam, expressRouteGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateTags = &respr + e.beginUpdateTags.add(req, beginUpdateTags) + } + + resp, err := server.PollerResponderNext(beginUpdateTags, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + e.beginUpdateTags.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateTags) { + e.beginUpdateTags.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutelinks_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutelinks_server.go new file mode 100644 index 00000000000..269ffaef85d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressroutelinks_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteLinksServer is a fake server for instances of the armnetwork.ExpressRouteLinksClient type. +type ExpressRouteLinksServer struct { + // Get is the fake for method ExpressRouteLinksClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, expressRoutePortName string, linkName string, options *armnetwork.ExpressRouteLinksClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRouteLinksClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRouteLinksClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, expressRoutePortName string, options *armnetwork.ExpressRouteLinksClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteLinksClientListResponse]) +} + +// NewExpressRouteLinksServerTransport creates a new instance of ExpressRouteLinksServerTransport with the provided implementation. +// The returned ExpressRouteLinksServerTransport instance is connected to an instance of armnetwork.ExpressRouteLinksClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteLinksServerTransport(srv *ExpressRouteLinksServer) *ExpressRouteLinksServerTransport { + return &ExpressRouteLinksServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteLinksClientListResponse]](), + } +} + +// ExpressRouteLinksServerTransport connects instances of armnetwork.ExpressRouteLinksClient to instances of ExpressRouteLinksServer. +// Don't use this type directly, use NewExpressRouteLinksServerTransport instead. +type ExpressRouteLinksServerTransport struct { + srv *ExpressRouteLinksServer + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteLinksClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteLinksServerTransport. +func (e *ExpressRouteLinksServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteLinksClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRouteLinksClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteLinksServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/links/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + linkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("linkName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, linkNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteLink, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRouteLinksServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/links` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListPager(resourceGroupNameParam, expressRoutePortNameParam, nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteLinksClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteportauthorizations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteportauthorizations_server.go new file mode 100644 index 00000000000..0d578804c84 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteportauthorizations_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRoutePortAuthorizationsServer is a fake server for instances of the armnetwork.ExpressRoutePortAuthorizationsClient type. +type ExpressRoutePortAuthorizationsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRoutePortAuthorizationsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, expressRoutePortName string, authorizationName string, authorizationParameters armnetwork.ExpressRoutePortAuthorization, options *armnetwork.ExpressRoutePortAuthorizationsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRoutePortAuthorizationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRoutePortAuthorizationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, expressRoutePortName string, authorizationName string, options *armnetwork.ExpressRoutePortAuthorizationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRoutePortAuthorizationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRoutePortAuthorizationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, expressRoutePortName string, authorizationName string, options *armnetwork.ExpressRoutePortAuthorizationsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRoutePortAuthorizationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRoutePortAuthorizationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, expressRoutePortName string, options *armnetwork.ExpressRoutePortAuthorizationsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRoutePortAuthorizationsClientListResponse]) +} + +// NewExpressRoutePortAuthorizationsServerTransport creates a new instance of ExpressRoutePortAuthorizationsServerTransport with the provided implementation. +// The returned ExpressRoutePortAuthorizationsServerTransport instance is connected to an instance of armnetwork.ExpressRoutePortAuthorizationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRoutePortAuthorizationsServerTransport(srv *ExpressRoutePortAuthorizationsServer) *ExpressRoutePortAuthorizationsServerTransport { + return &ExpressRoutePortAuthorizationsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRoutePortAuthorizationsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRoutePortAuthorizationsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRoutePortAuthorizationsClientListResponse]](), + } +} + +// ExpressRoutePortAuthorizationsServerTransport connects instances of armnetwork.ExpressRoutePortAuthorizationsClient to instances of ExpressRoutePortAuthorizationsServer. +// Don't use this type directly, use NewExpressRoutePortAuthorizationsServerTransport instead. +type ExpressRoutePortAuthorizationsServerTransport struct { + srv *ExpressRoutePortAuthorizationsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRoutePortAuthorizationsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRoutePortAuthorizationsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRoutePortAuthorizationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRoutePortAuthorizationsServerTransport. +func (e *ExpressRoutePortAuthorizationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRoutePortAuthorizationsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRoutePortAuthorizationsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRoutePortAuthorizationsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRoutePortAuthorizationsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRoutePortAuthorizationsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRoutePortAuthorization](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + authorizationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("authorizationName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, authorizationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRoutePortAuthorizationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + authorizationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("authorizationName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, authorizationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRoutePortAuthorizationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + authorizationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("authorizationName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, authorizationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRoutePortAuthorization, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRoutePortAuthorizationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/authorizations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListPager(resourceGroupNameParam, expressRoutePortNameParam, nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRoutePortAuthorizationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteports_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteports_server.go new file mode 100644 index 00000000000..7c3f6a28166 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteports_server.go @@ -0,0 +1,383 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRoutePortsServer is a fake server for instances of the armnetwork.ExpressRoutePortsClient type. +type ExpressRoutePortsServer struct { + // BeginCreateOrUpdate is the fake for method ExpressRoutePortsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, expressRoutePortName string, parameters armnetwork.ExpressRoutePort, options *armnetwork.ExpressRoutePortsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ExpressRoutePortsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ExpressRoutePortsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, expressRoutePortName string, options *armnetwork.ExpressRoutePortsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ExpressRoutePortsClientDeleteResponse], errResp azfake.ErrorResponder) + + // GenerateLOA is the fake for method ExpressRoutePortsClient.GenerateLOA + // HTTP status codes to indicate success: http.StatusOK + GenerateLOA func(ctx context.Context, resourceGroupName string, expressRoutePortName string, request armnetwork.GenerateExpressRoutePortsLOARequest, options *armnetwork.ExpressRoutePortsClientGenerateLOAOptions) (resp azfake.Responder[armnetwork.ExpressRoutePortsClientGenerateLOAResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ExpressRoutePortsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, expressRoutePortName string, options *armnetwork.ExpressRoutePortsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRoutePortsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRoutePortsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.ExpressRoutePortsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRoutePortsClientListResponse]) + + // NewListByResourceGroupPager is the fake for method ExpressRoutePortsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.ExpressRoutePortsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.ExpressRoutePortsClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method ExpressRoutePortsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, expressRoutePortName string, parameters armnetwork.TagsObject, options *armnetwork.ExpressRoutePortsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ExpressRoutePortsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewExpressRoutePortsServerTransport creates a new instance of ExpressRoutePortsServerTransport with the provided implementation. +// The returned ExpressRoutePortsServerTransport instance is connected to an instance of armnetwork.ExpressRoutePortsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRoutePortsServerTransport(srv *ExpressRoutePortsServer) *ExpressRoutePortsServerTransport { + return &ExpressRoutePortsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ExpressRoutePortsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ExpressRoutePortsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRoutePortsClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRoutePortsClientListByResourceGroupResponse]](), + } +} + +// ExpressRoutePortsServerTransport connects instances of armnetwork.ExpressRoutePortsClient to instances of ExpressRoutePortsServer. +// Don't use this type directly, use NewExpressRoutePortsServerTransport instead. +type ExpressRoutePortsServerTransport struct { + srv *ExpressRoutePortsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ExpressRoutePortsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ExpressRoutePortsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRoutePortsClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.ExpressRoutePortsClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRoutePortsServerTransport. +func (e *ExpressRoutePortsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRoutePortsClient.BeginCreateOrUpdate": + resp, err = e.dispatchBeginCreateOrUpdate(req) + case "ExpressRoutePortsClient.BeginDelete": + resp, err = e.dispatchBeginDelete(req) + case "ExpressRoutePortsClient.GenerateLOA": + resp, err = e.dispatchGenerateLOA(req) + case "ExpressRoutePortsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRoutePortsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + case "ExpressRoutePortsClient.NewListByResourceGroupPager": + resp, err = e.dispatchNewListByResourceGroupPager(req) + case "ExpressRoutePortsClient.UpdateTags": + resp, err = e.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRoutePortsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if e.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := e.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ExpressRoutePort](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + e.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + e.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + e.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (e *ExpressRoutePortsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if e.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := e.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.BeginDelete(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + e.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + e.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + e.beginDelete.remove(req) + } + + return resp, nil +} + +func (e *ExpressRoutePortsServerTransport) dispatchGenerateLOA(req *http.Request) (*http.Response, error) { + if e.srv.GenerateLOA == nil { + return nil, &nonRetriableError{errors.New("fake for method GenerateLOA not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/generateLoa` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.GenerateExpressRoutePortsLOARequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.GenerateLOA(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).GenerateExpressRoutePortsLOAResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRoutePortsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRoutePort, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRoutePortsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := e.srv.NewListPager(nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRoutePortsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} + +func (e *ExpressRoutePortsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := e.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := e.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + e.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.ExpressRoutePortsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + e.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (e *ExpressRoutePortsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if e.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + expressRoutePortNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("expressRoutePortName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.UpdateTags(req.Context(), resourceGroupNameParam, expressRoutePortNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRoutePort, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteportslocations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteportslocations_server.go new file mode 100644 index 00000000000..0ee66114198 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteportslocations_server.go @@ -0,0 +1,140 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRoutePortsLocationsServer is a fake server for instances of the armnetwork.ExpressRoutePortsLocationsClient type. +type ExpressRoutePortsLocationsServer struct { + // Get is the fake for method ExpressRoutePortsLocationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, locationName string, options *armnetwork.ExpressRoutePortsLocationsClientGetOptions) (resp azfake.Responder[armnetwork.ExpressRoutePortsLocationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ExpressRoutePortsLocationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.ExpressRoutePortsLocationsClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRoutePortsLocationsClientListResponse]) +} + +// NewExpressRoutePortsLocationsServerTransport creates a new instance of ExpressRoutePortsLocationsServerTransport with the provided implementation. +// The returned ExpressRoutePortsLocationsServerTransport instance is connected to an instance of armnetwork.ExpressRoutePortsLocationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRoutePortsLocationsServerTransport(srv *ExpressRoutePortsLocationsServer) *ExpressRoutePortsLocationsServerTransport { + return &ExpressRoutePortsLocationsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRoutePortsLocationsClientListResponse]](), + } +} + +// ExpressRoutePortsLocationsServerTransport connects instances of armnetwork.ExpressRoutePortsLocationsClient to instances of ExpressRoutePortsLocationsServer. +// Don't use this type directly, use NewExpressRoutePortsLocationsServerTransport instead. +type ExpressRoutePortsLocationsServerTransport struct { + srv *ExpressRoutePortsLocationsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRoutePortsLocationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRoutePortsLocationsServerTransport. +func (e *ExpressRoutePortsLocationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRoutePortsLocationsClient.Get": + resp, err = e.dispatchGet(req) + case "ExpressRoutePortsLocationsClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRoutePortsLocationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if e.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePortsLocations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("locationName")]) + if err != nil { + return nil, err + } + respr, errRespr := e.srv.Get(req.Context(), locationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRoutePortsLocation, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (e *ExpressRoutePortsLocationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ExpressRoutePortsLocations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := e.srv.NewListPager(nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRoutePortsLocationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteproviderportslocation_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteproviderportslocation_server.go new file mode 100644 index 00000000000..82545cdb940 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteproviderportslocation_server.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ExpressRouteProviderPortsLocationServer is a fake server for instances of the armnetwork.ExpressRouteProviderPortsLocationClient type. +type ExpressRouteProviderPortsLocationServer struct { + // List is the fake for method ExpressRouteProviderPortsLocationClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, options *armnetwork.ExpressRouteProviderPortsLocationClientListOptions) (resp azfake.Responder[armnetwork.ExpressRouteProviderPortsLocationClientListResponse], errResp azfake.ErrorResponder) +} + +// NewExpressRouteProviderPortsLocationServerTransport creates a new instance of ExpressRouteProviderPortsLocationServerTransport with the provided implementation. +// The returned ExpressRouteProviderPortsLocationServerTransport instance is connected to an instance of armnetwork.ExpressRouteProviderPortsLocationClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteProviderPortsLocationServerTransport(srv *ExpressRouteProviderPortsLocationServer) *ExpressRouteProviderPortsLocationServerTransport { + return &ExpressRouteProviderPortsLocationServerTransport{srv: srv} +} + +// ExpressRouteProviderPortsLocationServerTransport connects instances of armnetwork.ExpressRouteProviderPortsLocationClient to instances of ExpressRouteProviderPortsLocationServer. +// Don't use this type directly, use NewExpressRouteProviderPortsLocationServerTransport instead. +type ExpressRouteProviderPortsLocationServerTransport struct { + srv *ExpressRouteProviderPortsLocationServer +} + +// Do implements the policy.Transporter interface for ExpressRouteProviderPortsLocationServerTransport. +func (e *ExpressRouteProviderPortsLocationServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteProviderPortsLocationClient.List": + resp, err = e.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteProviderPortsLocationServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if e.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteProviderPorts` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + filterUnescaped, err := url.QueryUnescape(qp.Get("$filter")) + if err != nil { + return nil, err + } + filterParam := getOptional(filterUnescaped) + var options *armnetwork.ExpressRouteProviderPortsLocationClientListOptions + if filterParam != nil { + options = &armnetwork.ExpressRouteProviderPortsLocationClientListOptions{ + Filter: filterParam, + } + } + respr, errRespr := e.srv.List(req.Context(), options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteProviderPortListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteserviceproviders_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteserviceproviders_server.go new file mode 100644 index 00000000000..c62b406c440 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/expressrouteserviceproviders_server.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "regexp" +) + +// ExpressRouteServiceProvidersServer is a fake server for instances of the armnetwork.ExpressRouteServiceProvidersClient type. +type ExpressRouteServiceProvidersServer struct { + // NewListPager is the fake for method ExpressRouteServiceProvidersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.ExpressRouteServiceProvidersClientListOptions) (resp azfake.PagerResponder[armnetwork.ExpressRouteServiceProvidersClientListResponse]) +} + +// NewExpressRouteServiceProvidersServerTransport creates a new instance of ExpressRouteServiceProvidersServerTransport with the provided implementation. +// The returned ExpressRouteServiceProvidersServerTransport instance is connected to an instance of armnetwork.ExpressRouteServiceProvidersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewExpressRouteServiceProvidersServerTransport(srv *ExpressRouteServiceProvidersServer) *ExpressRouteServiceProvidersServerTransport { + return &ExpressRouteServiceProvidersServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.ExpressRouteServiceProvidersClientListResponse]](), + } +} + +// ExpressRouteServiceProvidersServerTransport connects instances of armnetwork.ExpressRouteServiceProvidersClient to instances of ExpressRouteServiceProvidersServer. +// Don't use this type directly, use NewExpressRouteServiceProvidersServerTransport instead. +type ExpressRouteServiceProvidersServerTransport struct { + srv *ExpressRouteServiceProvidersServer + newListPager *tracker[azfake.PagerResponder[armnetwork.ExpressRouteServiceProvidersClientListResponse]] +} + +// Do implements the policy.Transporter interface for ExpressRouteServiceProvidersServerTransport. +func (e *ExpressRouteServiceProvidersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ExpressRouteServiceProvidersClient.NewListPager": + resp, err = e.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ExpressRouteServiceProvidersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if e.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := e.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteServiceProviders` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := e.srv.NewListPager(nil) + newListPager = &resp + e.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ExpressRouteServiceProvidersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + e.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + e.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicies_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicies_server.go new file mode 100644 index 00000000000..f6d670c465e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicies_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// FirewallPoliciesServer is a fake server for instances of the armnetwork.FirewallPoliciesClient type. +type FirewallPoliciesServer struct { + // BeginCreateOrUpdate is the fake for method FirewallPoliciesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters armnetwork.FirewallPolicy, options *armnetwork.FirewallPoliciesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.FirewallPoliciesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method FirewallPoliciesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *armnetwork.FirewallPoliciesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.FirewallPoliciesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method FirewallPoliciesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *armnetwork.FirewallPoliciesClientGetOptions) (resp azfake.Responder[armnetwork.FirewallPoliciesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method FirewallPoliciesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.FirewallPoliciesClientListOptions) (resp azfake.PagerResponder[armnetwork.FirewallPoliciesClientListResponse]) + + // NewListAllPager is the fake for method FirewallPoliciesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.FirewallPoliciesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.FirewallPoliciesClientListAllResponse]) + + // UpdateTags is the fake for method FirewallPoliciesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters armnetwork.TagsObject, options *armnetwork.FirewallPoliciesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.FirewallPoliciesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewFirewallPoliciesServerTransport creates a new instance of FirewallPoliciesServerTransport with the provided implementation. +// The returned FirewallPoliciesServerTransport instance is connected to an instance of armnetwork.FirewallPoliciesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewFirewallPoliciesServerTransport(srv *FirewallPoliciesServer) *FirewallPoliciesServerTransport { + return &FirewallPoliciesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.FirewallPoliciesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.FirewallPoliciesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.FirewallPoliciesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.FirewallPoliciesClientListAllResponse]](), + } +} + +// FirewallPoliciesServerTransport connects instances of armnetwork.FirewallPoliciesClient to instances of FirewallPoliciesServer. +// Don't use this type directly, use NewFirewallPoliciesServerTransport instead. +type FirewallPoliciesServerTransport struct { + srv *FirewallPoliciesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.FirewallPoliciesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.FirewallPoliciesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.FirewallPoliciesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.FirewallPoliciesClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for FirewallPoliciesServerTransport. +func (f *FirewallPoliciesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "FirewallPoliciesClient.BeginCreateOrUpdate": + resp, err = f.dispatchBeginCreateOrUpdate(req) + case "FirewallPoliciesClient.BeginDelete": + resp, err = f.dispatchBeginDelete(req) + case "FirewallPoliciesClient.Get": + resp, err = f.dispatchGet(req) + case "FirewallPoliciesClient.NewListPager": + resp, err = f.dispatchNewListPager(req) + case "FirewallPoliciesClient.NewListAllPager": + resp, err = f.dispatchNewListAllPager(req) + case "FirewallPoliciesClient.UpdateTags": + resp, err = f.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (f *FirewallPoliciesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if f.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := f.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.FirewallPolicy](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + f.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + f.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + f.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (f *FirewallPoliciesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if f.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := f.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.BeginDelete(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + f.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + f.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + f.beginDelete.remove(req) + } + + return resp, nil +} + +func (f *FirewallPoliciesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if f.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.FirewallPoliciesClientGetOptions + if expandParam != nil { + options = &armnetwork.FirewallPoliciesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := f.srv.Get(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).FirewallPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (f *FirewallPoliciesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if f.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := f.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := f.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + f.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.FirewallPoliciesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + f.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + f.newListPager.remove(req) + } + return resp, nil +} + +func (f *FirewallPoliciesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if f.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := f.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := f.srv.NewListAllPager(nil) + newListAllPager = &resp + f.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.FirewallPoliciesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + f.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + f.newListAllPager.remove(req) + } + return resp, nil +} + +func (f *FirewallPoliciesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if f.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.UpdateTags(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).FirewallPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignatures_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignatures_server.go new file mode 100644 index 00000000000..924ca9ccf06 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignatures_server.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// FirewallPolicyIdpsSignaturesServer is a fake server for instances of the armnetwork.FirewallPolicyIdpsSignaturesClient type. +type FirewallPolicyIdpsSignaturesServer struct { + // List is the fake for method FirewallPolicyIdpsSignaturesClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters armnetwork.IDPSQueryObject, options *armnetwork.FirewallPolicyIdpsSignaturesClientListOptions) (resp azfake.Responder[armnetwork.FirewallPolicyIdpsSignaturesClientListResponse], errResp azfake.ErrorResponder) +} + +// NewFirewallPolicyIdpsSignaturesServerTransport creates a new instance of FirewallPolicyIdpsSignaturesServerTransport with the provided implementation. +// The returned FirewallPolicyIdpsSignaturesServerTransport instance is connected to an instance of armnetwork.FirewallPolicyIdpsSignaturesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewFirewallPolicyIdpsSignaturesServerTransport(srv *FirewallPolicyIdpsSignaturesServer) *FirewallPolicyIdpsSignaturesServerTransport { + return &FirewallPolicyIdpsSignaturesServerTransport{srv: srv} +} + +// FirewallPolicyIdpsSignaturesServerTransport connects instances of armnetwork.FirewallPolicyIdpsSignaturesClient to instances of FirewallPolicyIdpsSignaturesServer. +// Don't use this type directly, use NewFirewallPolicyIdpsSignaturesServerTransport instead. +type FirewallPolicyIdpsSignaturesServerTransport struct { + srv *FirewallPolicyIdpsSignaturesServer +} + +// Do implements the policy.Transporter interface for FirewallPolicyIdpsSignaturesServerTransport. +func (f *FirewallPolicyIdpsSignaturesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "FirewallPolicyIdpsSignaturesClient.List": + resp, err = f.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (f *FirewallPolicyIdpsSignaturesServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if f.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/listIdpsSignatures` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.IDPSQueryObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.List(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).QueryResults, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignaturesfiltervalues_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignaturesfiltervalues_server.go new file mode 100644 index 00000000000..070a65f0f62 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignaturesfiltervalues_server.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// FirewallPolicyIdpsSignaturesFilterValuesServer is a fake server for instances of the armnetwork.FirewallPolicyIdpsSignaturesFilterValuesClient type. +type FirewallPolicyIdpsSignaturesFilterValuesServer struct { + // List is the fake for method FirewallPolicyIdpsSignaturesFilterValuesClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters armnetwork.SignatureOverridesFilterValuesQuery, options *armnetwork.FirewallPolicyIdpsSignaturesFilterValuesClientListOptions) (resp azfake.Responder[armnetwork.FirewallPolicyIdpsSignaturesFilterValuesClientListResponse], errResp azfake.ErrorResponder) +} + +// NewFirewallPolicyIdpsSignaturesFilterValuesServerTransport creates a new instance of FirewallPolicyIdpsSignaturesFilterValuesServerTransport with the provided implementation. +// The returned FirewallPolicyIdpsSignaturesFilterValuesServerTransport instance is connected to an instance of armnetwork.FirewallPolicyIdpsSignaturesFilterValuesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewFirewallPolicyIdpsSignaturesFilterValuesServerTransport(srv *FirewallPolicyIdpsSignaturesFilterValuesServer) *FirewallPolicyIdpsSignaturesFilterValuesServerTransport { + return &FirewallPolicyIdpsSignaturesFilterValuesServerTransport{srv: srv} +} + +// FirewallPolicyIdpsSignaturesFilterValuesServerTransport connects instances of armnetwork.FirewallPolicyIdpsSignaturesFilterValuesClient to instances of FirewallPolicyIdpsSignaturesFilterValuesServer. +// Don't use this type directly, use NewFirewallPolicyIdpsSignaturesFilterValuesServerTransport instead. +type FirewallPolicyIdpsSignaturesFilterValuesServerTransport struct { + srv *FirewallPolicyIdpsSignaturesFilterValuesServer +} + +// Do implements the policy.Transporter interface for FirewallPolicyIdpsSignaturesFilterValuesServerTransport. +func (f *FirewallPolicyIdpsSignaturesFilterValuesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "FirewallPolicyIdpsSignaturesFilterValuesClient.List": + resp, err = f.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (f *FirewallPolicyIdpsSignaturesFilterValuesServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if f.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/listIdpsFilterOptions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SignatureOverridesFilterValuesQuery](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.List(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SignatureOverridesFilterValuesResponse, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignaturesoverrides_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignaturesoverrides_server.go new file mode 100644 index 00000000000..98ff6f46c5a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyidpssignaturesoverrides_server.go @@ -0,0 +1,225 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// FirewallPolicyIdpsSignaturesOverridesServer is a fake server for instances of the armnetwork.FirewallPolicyIdpsSignaturesOverridesClient type. +type FirewallPolicyIdpsSignaturesOverridesServer struct { + // Get is the fake for method FirewallPolicyIdpsSignaturesOverridesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *armnetwork.FirewallPolicyIdpsSignaturesOverridesClientGetOptions) (resp azfake.Responder[armnetwork.FirewallPolicyIdpsSignaturesOverridesClientGetResponse], errResp azfake.ErrorResponder) + + // List is the fake for method FirewallPolicyIdpsSignaturesOverridesClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *armnetwork.FirewallPolicyIdpsSignaturesOverridesClientListOptions) (resp azfake.Responder[armnetwork.FirewallPolicyIdpsSignaturesOverridesClientListResponse], errResp azfake.ErrorResponder) + + // Patch is the fake for method FirewallPolicyIdpsSignaturesOverridesClient.Patch + // HTTP status codes to indicate success: http.StatusOK + Patch func(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters armnetwork.SignaturesOverrides, options *armnetwork.FirewallPolicyIdpsSignaturesOverridesClientPatchOptions) (resp azfake.Responder[armnetwork.FirewallPolicyIdpsSignaturesOverridesClientPatchResponse], errResp azfake.ErrorResponder) + + // Put is the fake for method FirewallPolicyIdpsSignaturesOverridesClient.Put + // HTTP status codes to indicate success: http.StatusOK + Put func(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters armnetwork.SignaturesOverrides, options *armnetwork.FirewallPolicyIdpsSignaturesOverridesClientPutOptions) (resp azfake.Responder[armnetwork.FirewallPolicyIdpsSignaturesOverridesClientPutResponse], errResp azfake.ErrorResponder) +} + +// NewFirewallPolicyIdpsSignaturesOverridesServerTransport creates a new instance of FirewallPolicyIdpsSignaturesOverridesServerTransport with the provided implementation. +// The returned FirewallPolicyIdpsSignaturesOverridesServerTransport instance is connected to an instance of armnetwork.FirewallPolicyIdpsSignaturesOverridesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewFirewallPolicyIdpsSignaturesOverridesServerTransport(srv *FirewallPolicyIdpsSignaturesOverridesServer) *FirewallPolicyIdpsSignaturesOverridesServerTransport { + return &FirewallPolicyIdpsSignaturesOverridesServerTransport{srv: srv} +} + +// FirewallPolicyIdpsSignaturesOverridesServerTransport connects instances of armnetwork.FirewallPolicyIdpsSignaturesOverridesClient to instances of FirewallPolicyIdpsSignaturesOverridesServer. +// Don't use this type directly, use NewFirewallPolicyIdpsSignaturesOverridesServerTransport instead. +type FirewallPolicyIdpsSignaturesOverridesServerTransport struct { + srv *FirewallPolicyIdpsSignaturesOverridesServer +} + +// Do implements the policy.Transporter interface for FirewallPolicyIdpsSignaturesOverridesServerTransport. +func (f *FirewallPolicyIdpsSignaturesOverridesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "FirewallPolicyIdpsSignaturesOverridesClient.Get": + resp, err = f.dispatchGet(req) + case "FirewallPolicyIdpsSignaturesOverridesClient.List": + resp, err = f.dispatchList(req) + case "FirewallPolicyIdpsSignaturesOverridesClient.Patch": + resp, err = f.dispatchPatch(req) + case "FirewallPolicyIdpsSignaturesOverridesClient.Put": + resp, err = f.dispatchPut(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (f *FirewallPolicyIdpsSignaturesOverridesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if f.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/signatureOverrides/default` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.Get(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SignaturesOverrides, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (f *FirewallPolicyIdpsSignaturesOverridesServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if f.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/signatureOverrides` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.List(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SignaturesOverridesList, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (f *FirewallPolicyIdpsSignaturesOverridesServerTransport) dispatchPatch(req *http.Request) (*http.Response, error) { + if f.srv.Patch == nil { + return nil, &nonRetriableError{errors.New("fake for method Patch not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/signatureOverrides/default` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SignaturesOverrides](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.Patch(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SignaturesOverrides, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (f *FirewallPolicyIdpsSignaturesOverridesServerTransport) dispatchPut(req *http.Request) (*http.Response, error) { + if f.srv.Put == nil { + return nil, &nonRetriableError{errors.New("fake for method Put not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/signatureOverrides/default` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SignaturesOverrides](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.Put(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SignaturesOverrides, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyrulecollectiongroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyrulecollectiongroups_server.go new file mode 100644 index 00000000000..bd2d10733a9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/firewallpolicyrulecollectiongroups_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// FirewallPolicyRuleCollectionGroupsServer is a fake server for instances of the armnetwork.FirewallPolicyRuleCollectionGroupsClient type. +type FirewallPolicyRuleCollectionGroupsServer struct { + // BeginCreateOrUpdate is the fake for method FirewallPolicyRuleCollectionGroupsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleCollectionGroupName string, parameters armnetwork.FirewallPolicyRuleCollectionGroup, options *armnetwork.FirewallPolicyRuleCollectionGroupsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method FirewallPolicyRuleCollectionGroupsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleCollectionGroupName string, options *armnetwork.FirewallPolicyRuleCollectionGroupsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method FirewallPolicyRuleCollectionGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleCollectionGroupName string, options *armnetwork.FirewallPolicyRuleCollectionGroupsClientGetOptions) (resp azfake.Responder[armnetwork.FirewallPolicyRuleCollectionGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method FirewallPolicyRuleCollectionGroupsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, firewallPolicyName string, options *armnetwork.FirewallPolicyRuleCollectionGroupsClientListOptions) (resp azfake.PagerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientListResponse]) +} + +// NewFirewallPolicyRuleCollectionGroupsServerTransport creates a new instance of FirewallPolicyRuleCollectionGroupsServerTransport with the provided implementation. +// The returned FirewallPolicyRuleCollectionGroupsServerTransport instance is connected to an instance of armnetwork.FirewallPolicyRuleCollectionGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewFirewallPolicyRuleCollectionGroupsServerTransport(srv *FirewallPolicyRuleCollectionGroupsServer) *FirewallPolicyRuleCollectionGroupsServerTransport { + return &FirewallPolicyRuleCollectionGroupsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientListResponse]](), + } +} + +// FirewallPolicyRuleCollectionGroupsServerTransport connects instances of armnetwork.FirewallPolicyRuleCollectionGroupsClient to instances of FirewallPolicyRuleCollectionGroupsServer. +// Don't use this type directly, use NewFirewallPolicyRuleCollectionGroupsServerTransport instead. +type FirewallPolicyRuleCollectionGroupsServerTransport struct { + srv *FirewallPolicyRuleCollectionGroupsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.FirewallPolicyRuleCollectionGroupsClientListResponse]] +} + +// Do implements the policy.Transporter interface for FirewallPolicyRuleCollectionGroupsServerTransport. +func (f *FirewallPolicyRuleCollectionGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "FirewallPolicyRuleCollectionGroupsClient.BeginCreateOrUpdate": + resp, err = f.dispatchBeginCreateOrUpdate(req) + case "FirewallPolicyRuleCollectionGroupsClient.BeginDelete": + resp, err = f.dispatchBeginDelete(req) + case "FirewallPolicyRuleCollectionGroupsClient.Get": + resp, err = f.dispatchGet(req) + case "FirewallPolicyRuleCollectionGroupsClient.NewListPager": + resp, err = f.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (f *FirewallPolicyRuleCollectionGroupsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if f.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := f.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollectionGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.FirewallPolicyRuleCollectionGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + ruleCollectionGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, ruleCollectionGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + f.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + f.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + f.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (f *FirewallPolicyRuleCollectionGroupsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if f.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := f.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollectionGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + ruleCollectionGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.BeginDelete(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, ruleCollectionGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + f.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + f.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + f.beginDelete.remove(req) + } + + return resp, nil +} + +func (f *FirewallPolicyRuleCollectionGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if f.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollectionGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + ruleCollectionGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.Get(req.Context(), resourceGroupNameParam, firewallPolicyNameParam, ruleCollectionGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).FirewallPolicyRuleCollectionGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (f *FirewallPolicyRuleCollectionGroupsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if f.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := f.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/firewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ruleCollectionGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + firewallPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("firewallPolicyName")]) + if err != nil { + return nil, err + } + resp := f.srv.NewListPager(resourceGroupNameParam, firewallPolicyNameParam, nil) + newListPager = &resp + f.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.FirewallPolicyRuleCollectionGroupsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + f.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + f.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/flowlogs_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/flowlogs_server.go new file mode 100644 index 00000000000..b466a7ea9a7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/flowlogs_server.go @@ -0,0 +1,319 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// FlowLogsServer is a fake server for instances of the armnetwork.FlowLogsClient type. +type FlowLogsServer struct { + // BeginCreateOrUpdate is the fake for method FlowLogsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, parameters armnetwork.FlowLog, options *armnetwork.FlowLogsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.FlowLogsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method FlowLogsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, options *armnetwork.FlowLogsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.FlowLogsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method FlowLogsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, options *armnetwork.FlowLogsClientGetOptions) (resp azfake.Responder[armnetwork.FlowLogsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method FlowLogsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkWatcherName string, options *armnetwork.FlowLogsClientListOptions) (resp azfake.PagerResponder[armnetwork.FlowLogsClientListResponse]) + + // UpdateTags is the fake for method FlowLogsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, parameters armnetwork.TagsObject, options *armnetwork.FlowLogsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.FlowLogsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewFlowLogsServerTransport creates a new instance of FlowLogsServerTransport with the provided implementation. +// The returned FlowLogsServerTransport instance is connected to an instance of armnetwork.FlowLogsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewFlowLogsServerTransport(srv *FlowLogsServer) *FlowLogsServerTransport { + return &FlowLogsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.FlowLogsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.FlowLogsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.FlowLogsClientListResponse]](), + } +} + +// FlowLogsServerTransport connects instances of armnetwork.FlowLogsClient to instances of FlowLogsServer. +// Don't use this type directly, use NewFlowLogsServerTransport instead. +type FlowLogsServerTransport struct { + srv *FlowLogsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.FlowLogsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.FlowLogsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.FlowLogsClientListResponse]] +} + +// Do implements the policy.Transporter interface for FlowLogsServerTransport. +func (f *FlowLogsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "FlowLogsClient.BeginCreateOrUpdate": + resp, err = f.dispatchBeginCreateOrUpdate(req) + case "FlowLogsClient.BeginDelete": + resp, err = f.dispatchBeginDelete(req) + case "FlowLogsClient.Get": + resp, err = f.dispatchGet(req) + case "FlowLogsClient.NewListPager": + resp, err = f.dispatchNewListPager(req) + case "FlowLogsClient.UpdateTags": + resp, err = f.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (f *FlowLogsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if f.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := f.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/flowLogs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.FlowLog](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + flowLogNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("flowLogName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkWatcherNameParam, flowLogNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + f.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + f.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + f.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (f *FlowLogsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if f.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := f.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/flowLogs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + flowLogNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("flowLogName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkWatcherNameParam, flowLogNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + f.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + f.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + f.beginDelete.remove(req) + } + + return resp, nil +} + +func (f *FlowLogsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if f.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/flowLogs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + flowLogNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("flowLogName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.Get(req.Context(), resourceGroupNameParam, networkWatcherNameParam, flowLogNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).FlowLog, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (f *FlowLogsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if f.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := f.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/flowLogs` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + resp := f.srv.NewListPager(resourceGroupNameParam, networkWatcherNameParam, nil) + newListPager = &resp + f.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.FlowLogsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + f.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + f.newListPager.remove(req) + } + return resp, nil +} + +func (f *FlowLogsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if f.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/flowLogs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + flowLogNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("flowLogName")]) + if err != nil { + return nil, err + } + respr, errRespr := f.srv.UpdateTags(req.Context(), resourceGroupNameParam, networkWatcherNameParam, flowLogNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).FlowLog, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/groups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/groups_server.go new file mode 100644 index 00000000000..d1c2ad26213 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/groups_server.go @@ -0,0 +1,312 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// GroupsServer is a fake server for instances of the armnetwork.GroupsClient type. +type GroupsServer struct { + // CreateOrUpdate is the fake for method GroupsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, parameters armnetwork.Group, options *armnetwork.GroupsClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.GroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method GroupsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, options *armnetwork.GroupsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.GroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method GroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, options *armnetwork.GroupsClientGetOptions) (resp azfake.Responder[armnetwork.GroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method GroupsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkManagerName string, options *armnetwork.GroupsClientListOptions) (resp azfake.PagerResponder[armnetwork.GroupsClientListResponse]) +} + +// NewGroupsServerTransport creates a new instance of GroupsServerTransport with the provided implementation. +// The returned GroupsServerTransport instance is connected to an instance of armnetwork.GroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewGroupsServerTransport(srv *GroupsServer) *GroupsServerTransport { + return &GroupsServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.GroupsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.GroupsClientListResponse]](), + } +} + +// GroupsServerTransport connects instances of armnetwork.GroupsClient to instances of GroupsServer. +// Don't use this type directly, use NewGroupsServerTransport instead. +type GroupsServerTransport struct { + srv *GroupsServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.GroupsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.GroupsClientListResponse]] +} + +// Do implements the policy.Transporter interface for GroupsServerTransport. +func (g *GroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "GroupsClient.CreateOrUpdate": + resp, err = g.dispatchCreateOrUpdate(req) + case "GroupsClient.BeginDelete": + resp, err = g.dispatchBeginDelete(req) + case "GroupsClient.Get": + resp, err = g.dispatchGet(req) + case "GroupsClient.NewListPager": + resp, err = g.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (g *GroupsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if g.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.Group](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + networkGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkGroupName")]) + if err != nil { + return nil, err + } + ifMatchParam := getOptional(getHeaderValue(req.Header, "If-Match")) + var options *armnetwork.GroupsClientCreateOrUpdateOptions + if ifMatchParam != nil { + options = &armnetwork.GroupsClientCreateOrUpdateOptions{ + IfMatch: ifMatchParam, + } + } + respr, errRespr := g.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, networkGroupNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Group, req) + if err != nil { + return nil, err + } + if val := server.GetResponse(respr).ETag; val != nil { + resp.Header.Set("ETag", *val) + } + return resp, nil +} + +func (g *GroupsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if g.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := g.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + networkGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkGroupName")]) + if err != nil { + return nil, err + } + forceUnescaped, err := url.QueryUnescape(qp.Get("force")) + if err != nil { + return nil, err + } + forceParam, err := parseOptional(forceUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armnetwork.GroupsClientBeginDeleteOptions + if forceParam != nil { + options = &armnetwork.GroupsClientBeginDeleteOptions{ + Force: forceParam, + } + } + respr, errRespr := g.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkManagerNameParam, networkGroupNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + g.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + g.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + g.beginDelete.remove(req) + } + + return resp, nil +} + +func (g *GroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if g.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + networkGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := g.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, networkGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Group, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (g *GroupsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if g.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := g.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.GroupsClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.GroupsClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := g.srv.NewListPager(resourceGroupNameParam, networkManagerNameParam, options) + newListPager = &resp + g.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.GroupsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + g.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + g.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/hubroutetables_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/hubroutetables_server.go new file mode 100644 index 00000000000..a088362a4b9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/hubroutetables_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// HubRouteTablesServer is a fake server for instances of the armnetwork.HubRouteTablesClient type. +type HubRouteTablesServer struct { + // BeginCreateOrUpdate is the fake for method HubRouteTablesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, routeTableParameters armnetwork.HubRouteTable, options *armnetwork.HubRouteTablesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.HubRouteTablesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method HubRouteTablesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *armnetwork.HubRouteTablesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.HubRouteTablesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method HubRouteTablesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *armnetwork.HubRouteTablesClientGetOptions) (resp azfake.Responder[armnetwork.HubRouteTablesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method HubRouteTablesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualHubName string, options *armnetwork.HubRouteTablesClientListOptions) (resp azfake.PagerResponder[armnetwork.HubRouteTablesClientListResponse]) +} + +// NewHubRouteTablesServerTransport creates a new instance of HubRouteTablesServerTransport with the provided implementation. +// The returned HubRouteTablesServerTransport instance is connected to an instance of armnetwork.HubRouteTablesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewHubRouteTablesServerTransport(srv *HubRouteTablesServer) *HubRouteTablesServerTransport { + return &HubRouteTablesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.HubRouteTablesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.HubRouteTablesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.HubRouteTablesClientListResponse]](), + } +} + +// HubRouteTablesServerTransport connects instances of armnetwork.HubRouteTablesClient to instances of HubRouteTablesServer. +// Don't use this type directly, use NewHubRouteTablesServerTransport instead. +type HubRouteTablesServerTransport struct { + srv *HubRouteTablesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.HubRouteTablesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.HubRouteTablesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.HubRouteTablesClientListResponse]] +} + +// Do implements the policy.Transporter interface for HubRouteTablesServerTransport. +func (h *HubRouteTablesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "HubRouteTablesClient.BeginCreateOrUpdate": + resp, err = h.dispatchBeginCreateOrUpdate(req) + case "HubRouteTablesClient.BeginDelete": + resp, err = h.dispatchBeginDelete(req) + case "HubRouteTablesClient.Get": + resp, err = h.dispatchGet(req) + case "HubRouteTablesClient.NewListPager": + resp, err = h.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (h *HubRouteTablesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if h.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := h.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubRouteTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.HubRouteTable](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := h.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeTableNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + h.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + h.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + h.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (h *HubRouteTablesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if h.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := h.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubRouteTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := h.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeTableNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + h.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + h.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + h.beginDelete.remove(req) + } + + return resp, nil +} + +func (h *HubRouteTablesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if h.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubRouteTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := h.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeTableNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).HubRouteTable, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (h *HubRouteTablesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if h.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := h.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubRouteTables` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + resp := h.srv.NewListPager(resourceGroupNameParam, virtualHubNameParam, nil) + newListPager = &resp + h.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.HubRouteTablesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + h.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + h.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/hubvirtualnetworkconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/hubvirtualnetworkconnections_server.go new file mode 100644 index 00000000000..6a825306ee8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/hubvirtualnetworkconnections_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// HubVirtualNetworkConnectionsServer is a fake server for instances of the armnetwork.HubVirtualNetworkConnectionsClient type. +type HubVirtualNetworkConnectionsServer struct { + // BeginCreateOrUpdate is the fake for method HubVirtualNetworkConnectionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, hubVirtualNetworkConnectionParameters armnetwork.HubVirtualNetworkConnection, options *armnetwork.HubVirtualNetworkConnectionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.HubVirtualNetworkConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method HubVirtualNetworkConnectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *armnetwork.HubVirtualNetworkConnectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.HubVirtualNetworkConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method HubVirtualNetworkConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *armnetwork.HubVirtualNetworkConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.HubVirtualNetworkConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method HubVirtualNetworkConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualHubName string, options *armnetwork.HubVirtualNetworkConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.HubVirtualNetworkConnectionsClientListResponse]) +} + +// NewHubVirtualNetworkConnectionsServerTransport creates a new instance of HubVirtualNetworkConnectionsServerTransport with the provided implementation. +// The returned HubVirtualNetworkConnectionsServerTransport instance is connected to an instance of armnetwork.HubVirtualNetworkConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewHubVirtualNetworkConnectionsServerTransport(srv *HubVirtualNetworkConnectionsServer) *HubVirtualNetworkConnectionsServerTransport { + return &HubVirtualNetworkConnectionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.HubVirtualNetworkConnectionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.HubVirtualNetworkConnectionsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.HubVirtualNetworkConnectionsClientListResponse]](), + } +} + +// HubVirtualNetworkConnectionsServerTransport connects instances of armnetwork.HubVirtualNetworkConnectionsClient to instances of HubVirtualNetworkConnectionsServer. +// Don't use this type directly, use NewHubVirtualNetworkConnectionsServerTransport instead. +type HubVirtualNetworkConnectionsServerTransport struct { + srv *HubVirtualNetworkConnectionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.HubVirtualNetworkConnectionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.HubVirtualNetworkConnectionsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.HubVirtualNetworkConnectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for HubVirtualNetworkConnectionsServerTransport. +func (h *HubVirtualNetworkConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "HubVirtualNetworkConnectionsClient.BeginCreateOrUpdate": + resp, err = h.dispatchBeginCreateOrUpdate(req) + case "HubVirtualNetworkConnectionsClient.BeginDelete": + resp, err = h.dispatchBeginDelete(req) + case "HubVirtualNetworkConnectionsClient.Get": + resp, err = h.dispatchGet(req) + case "HubVirtualNetworkConnectionsClient.NewListPager": + resp, err = h.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (h *HubVirtualNetworkConnectionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if h.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := h.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubVirtualNetworkConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.HubVirtualNetworkConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := h.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, connectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + h.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + h.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + h.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (h *HubVirtualNetworkConnectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if h.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := h.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubVirtualNetworkConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := h.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + h.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + h.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + h.beginDelete.remove(req) + } + + return resp, nil +} + +func (h *HubVirtualNetworkConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if h.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubVirtualNetworkConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := h.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).HubVirtualNetworkConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (h *HubVirtualNetworkConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if h.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := h.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/hubVirtualNetworkConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + resp := h.srv.NewListPager(resourceGroupNameParam, virtualHubNameParam, nil) + newListPager = &resp + h.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.HubVirtualNetworkConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + h.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + h.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/inboundnatrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/inboundnatrules_server.go new file mode 100644 index 00000000000..dc862193227 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/inboundnatrules_server.go @@ -0,0 +1,284 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// InboundNatRulesServer is a fake server for instances of the armnetwork.InboundNatRulesClient type. +type InboundNatRulesServer struct { + // BeginCreateOrUpdate is the fake for method InboundNatRulesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, inboundNatRuleParameters armnetwork.InboundNatRule, options *armnetwork.InboundNatRulesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.InboundNatRulesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method InboundNatRulesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, options *armnetwork.InboundNatRulesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.InboundNatRulesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method InboundNatRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, options *armnetwork.InboundNatRulesClientGetOptions) (resp azfake.Responder[armnetwork.InboundNatRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method InboundNatRulesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, loadBalancerName string, options *armnetwork.InboundNatRulesClientListOptions) (resp azfake.PagerResponder[armnetwork.InboundNatRulesClientListResponse]) +} + +// NewInboundNatRulesServerTransport creates a new instance of InboundNatRulesServerTransport with the provided implementation. +// The returned InboundNatRulesServerTransport instance is connected to an instance of armnetwork.InboundNatRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewInboundNatRulesServerTransport(srv *InboundNatRulesServer) *InboundNatRulesServerTransport { + return &InboundNatRulesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.InboundNatRulesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.InboundNatRulesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.InboundNatRulesClientListResponse]](), + } +} + +// InboundNatRulesServerTransport connects instances of armnetwork.InboundNatRulesClient to instances of InboundNatRulesServer. +// Don't use this type directly, use NewInboundNatRulesServerTransport instead. +type InboundNatRulesServerTransport struct { + srv *InboundNatRulesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.InboundNatRulesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.InboundNatRulesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.InboundNatRulesClientListResponse]] +} + +// Do implements the policy.Transporter interface for InboundNatRulesServerTransport. +func (i *InboundNatRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "InboundNatRulesClient.BeginCreateOrUpdate": + resp, err = i.dispatchBeginCreateOrUpdate(req) + case "InboundNatRulesClient.BeginDelete": + resp, err = i.dispatchBeginDelete(req) + case "InboundNatRulesClient.Get": + resp, err = i.dispatchGet(req) + case "InboundNatRulesClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *InboundNatRulesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := i.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/inboundNatRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.InboundNatRule](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + inboundNatRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("inboundNatRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, loadBalancerNameParam, inboundNatRuleNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + i.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + i.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (i *InboundNatRulesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if i.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := i.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/inboundNatRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + inboundNatRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("inboundNatRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginDelete(req.Context(), resourceGroupNameParam, loadBalancerNameParam, inboundNatRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + i.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + i.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + i.beginDelete.remove(req) + } + + return resp, nil +} + +func (i *InboundNatRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if i.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/inboundNatRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + inboundNatRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("inboundNatRuleName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.InboundNatRulesClientGetOptions + if expandParam != nil { + options = &armnetwork.InboundNatRulesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.Get(req.Context(), resourceGroupNameParam, loadBalancerNameParam, inboundNatRuleNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).InboundNatRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *InboundNatRulesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/inboundNatRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListPager(resourceGroupNameParam, loadBalancerNameParam, nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.InboundNatRulesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/inboundsecurityrule_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/inboundsecurityrule_server.go new file mode 100644 index 00000000000..6900531fca9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/inboundsecurityrule_server.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// InboundSecurityRuleServer is a fake server for instances of the armnetwork.InboundSecurityRuleClient type. +type InboundSecurityRuleServer struct { + // BeginCreateOrUpdate is the fake for method InboundSecurityRuleClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, ruleCollectionName string, parameters armnetwork.InboundSecurityRule, options *armnetwork.InboundSecurityRuleClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.InboundSecurityRuleClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewInboundSecurityRuleServerTransport creates a new instance of InboundSecurityRuleServerTransport with the provided implementation. +// The returned InboundSecurityRuleServerTransport instance is connected to an instance of armnetwork.InboundSecurityRuleClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewInboundSecurityRuleServerTransport(srv *InboundSecurityRuleServer) *InboundSecurityRuleServerTransport { + return &InboundSecurityRuleServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.InboundSecurityRuleClientCreateOrUpdateResponse]](), + } +} + +// InboundSecurityRuleServerTransport connects instances of armnetwork.InboundSecurityRuleClient to instances of InboundSecurityRuleServer. +// Don't use this type directly, use NewInboundSecurityRuleServerTransport instead. +type InboundSecurityRuleServerTransport struct { + srv *InboundSecurityRuleServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.InboundSecurityRuleClientCreateOrUpdateResponse]] +} + +// Do implements the policy.Transporter interface for InboundSecurityRuleServerTransport. +func (i *InboundSecurityRuleServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "InboundSecurityRuleClient.BeginCreateOrUpdate": + resp, err = i.dispatchBeginCreateOrUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *InboundSecurityRuleServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := i.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/inboundSecurityRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.InboundSecurityRule](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + ruleCollectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleCollectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, ruleCollectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + i.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + i.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaceipconfigurations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaceipconfigurations_server.go new file mode 100644 index 00000000000..6190556b4eb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaceipconfigurations_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// InterfaceIPConfigurationsServer is a fake server for instances of the armnetwork.InterfaceIPConfigurationsClient type. +type InterfaceIPConfigurationsServer struct { + // Get is the fake for method InterfaceIPConfigurationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkInterfaceName string, ipConfigurationName string, options *armnetwork.InterfaceIPConfigurationsClientGetOptions) (resp azfake.Responder[armnetwork.InterfaceIPConfigurationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method InterfaceIPConfigurationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfaceIPConfigurationsClientListOptions) (resp azfake.PagerResponder[armnetwork.InterfaceIPConfigurationsClientListResponse]) +} + +// NewInterfaceIPConfigurationsServerTransport creates a new instance of InterfaceIPConfigurationsServerTransport with the provided implementation. +// The returned InterfaceIPConfigurationsServerTransport instance is connected to an instance of armnetwork.InterfaceIPConfigurationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewInterfaceIPConfigurationsServerTransport(srv *InterfaceIPConfigurationsServer) *InterfaceIPConfigurationsServerTransport { + return &InterfaceIPConfigurationsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.InterfaceIPConfigurationsClientListResponse]](), + } +} + +// InterfaceIPConfigurationsServerTransport connects instances of armnetwork.InterfaceIPConfigurationsClient to instances of InterfaceIPConfigurationsServer. +// Don't use this type directly, use NewInterfaceIPConfigurationsServerTransport instead. +type InterfaceIPConfigurationsServerTransport struct { + srv *InterfaceIPConfigurationsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.InterfaceIPConfigurationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for InterfaceIPConfigurationsServerTransport. +func (i *InterfaceIPConfigurationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "InterfaceIPConfigurationsClient.Get": + resp, err = i.dispatchGet(req) + case "InterfaceIPConfigurationsClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *InterfaceIPConfigurationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if i.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + ipConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.Get(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, ipConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).InterfaceIPConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *InterfaceIPConfigurationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListPager(resourceGroupNameParam, networkInterfaceNameParam, nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.InterfaceIPConfigurationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaceloadbalancers_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaceloadbalancers_server.go new file mode 100644 index 00000000000..b43b668e429 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaceloadbalancers_server.go @@ -0,0 +1,112 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// InterfaceLoadBalancersServer is a fake server for instances of the armnetwork.InterfaceLoadBalancersClient type. +type InterfaceLoadBalancersServer struct { + // NewListPager is the fake for method InterfaceLoadBalancersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfaceLoadBalancersClientListOptions) (resp azfake.PagerResponder[armnetwork.InterfaceLoadBalancersClientListResponse]) +} + +// NewInterfaceLoadBalancersServerTransport creates a new instance of InterfaceLoadBalancersServerTransport with the provided implementation. +// The returned InterfaceLoadBalancersServerTransport instance is connected to an instance of armnetwork.InterfaceLoadBalancersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewInterfaceLoadBalancersServerTransport(srv *InterfaceLoadBalancersServer) *InterfaceLoadBalancersServerTransport { + return &InterfaceLoadBalancersServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.InterfaceLoadBalancersClientListResponse]](), + } +} + +// InterfaceLoadBalancersServerTransport connects instances of armnetwork.InterfaceLoadBalancersClient to instances of InterfaceLoadBalancersServer. +// Don't use this type directly, use NewInterfaceLoadBalancersServerTransport instead. +type InterfaceLoadBalancersServerTransport struct { + srv *InterfaceLoadBalancersServer + newListPager *tracker[azfake.PagerResponder[armnetwork.InterfaceLoadBalancersClientListResponse]] +} + +// Do implements the policy.Transporter interface for InterfaceLoadBalancersServerTransport. +func (i *InterfaceLoadBalancersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "InterfaceLoadBalancersClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *InterfaceLoadBalancersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/loadBalancers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListPager(resourceGroupNameParam, networkInterfaceNameParam, nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.InterfaceLoadBalancersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaces_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaces_server.go new file mode 100644 index 00000000000..8d4a46be1e9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfaces_server.go @@ -0,0 +1,910 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// InterfacesServer is a fake server for instances of the armnetwork.InterfacesClient type. +type InterfacesServer struct { + // BeginCreateOrUpdate is the fake for method InterfacesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters armnetwork.Interface, options *armnetwork.InterfacesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.InterfacesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method InterfacesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfacesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.InterfacesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method InterfacesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) + + // GetCloudServiceNetworkInterface is the fake for method InterfacesClient.GetCloudServiceNetworkInterface + // HTTP status codes to indicate success: http.StatusOK + GetCloudServiceNetworkInterface func(ctx context.Context, resourceGroupName string, cloudServiceName string, roleInstanceName string, networkInterfaceName string, options *armnetwork.InterfacesClientGetCloudServiceNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetCloudServiceNetworkInterfaceResponse], errResp azfake.ErrorResponder) + + // BeginGetEffectiveRouteTable is the fake for method InterfacesClient.BeginGetEffectiveRouteTable + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetEffectiveRouteTable func(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfacesClientBeginGetEffectiveRouteTableOptions) (resp azfake.PollerResponder[armnetwork.InterfacesClientGetEffectiveRouteTableResponse], errResp azfake.ErrorResponder) + + // GetVirtualMachineScaleSetIPConfiguration is the fake for method InterfacesClient.GetVirtualMachineScaleSetIPConfiguration + // HTTP status codes to indicate success: http.StatusOK + GetVirtualMachineScaleSetIPConfiguration func(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, ipConfigurationName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetIPConfigurationOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetIPConfigurationResponse], errResp azfake.ErrorResponder) + + // GetVirtualMachineScaleSetNetworkInterface is the fake for method InterfacesClient.GetVirtualMachineScaleSetNetworkInterface + // HTTP status codes to indicate success: http.StatusOK + GetVirtualMachineScaleSetNetworkInterface func(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method InterfacesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.InterfacesClientListOptions) (resp azfake.PagerResponder[armnetwork.InterfacesClientListResponse]) + + // NewListAllPager is the fake for method InterfacesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.InterfacesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.InterfacesClientListAllResponse]) + + // NewListCloudServiceNetworkInterfacesPager is the fake for method InterfacesClient.NewListCloudServiceNetworkInterfacesPager + // HTTP status codes to indicate success: http.StatusOK + NewListCloudServiceNetworkInterfacesPager func(resourceGroupName string, cloudServiceName string, options *armnetwork.InterfacesClientListCloudServiceNetworkInterfacesOptions) (resp azfake.PagerResponder[armnetwork.InterfacesClientListCloudServiceNetworkInterfacesResponse]) + + // NewListCloudServiceRoleInstanceNetworkInterfacesPager is the fake for method InterfacesClient.NewListCloudServiceRoleInstanceNetworkInterfacesPager + // HTTP status codes to indicate success: http.StatusOK + NewListCloudServiceRoleInstanceNetworkInterfacesPager func(resourceGroupName string, cloudServiceName string, roleInstanceName string, options *armnetwork.InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesOptions) (resp azfake.PagerResponder[armnetwork.InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse]) + + // BeginListEffectiveNetworkSecurityGroups is the fake for method InterfacesClient.BeginListEffectiveNetworkSecurityGroups + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListEffectiveNetworkSecurityGroups func(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfacesClientBeginListEffectiveNetworkSecurityGroupsOptions) (resp azfake.PollerResponder[armnetwork.InterfacesClientListEffectiveNetworkSecurityGroupsResponse], errResp azfake.ErrorResponder) + + // NewListVirtualMachineScaleSetIPConfigurationsPager is the fake for method InterfacesClient.NewListVirtualMachineScaleSetIPConfigurationsPager + // HTTP status codes to indicate success: http.StatusOK + NewListVirtualMachineScaleSetIPConfigurationsPager func(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, options *armnetwork.InterfacesClientListVirtualMachineScaleSetIPConfigurationsOptions) (resp azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse]) + + // NewListVirtualMachineScaleSetNetworkInterfacesPager is the fake for method InterfacesClient.NewListVirtualMachineScaleSetNetworkInterfacesPager + // HTTP status codes to indicate success: http.StatusOK + NewListVirtualMachineScaleSetNetworkInterfacesPager func(resourceGroupName string, virtualMachineScaleSetName string, options *armnetwork.InterfacesClientListVirtualMachineScaleSetNetworkInterfacesOptions) (resp azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse]) + + // NewListVirtualMachineScaleSetVMNetworkInterfacesPager is the fake for method InterfacesClient.NewListVirtualMachineScaleSetVMNetworkInterfacesPager + // HTTP status codes to indicate success: http.StatusOK + NewListVirtualMachineScaleSetVMNetworkInterfacesPager func(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, options *armnetwork.InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesOptions) (resp azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse]) + + // UpdateTags is the fake for method InterfacesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters armnetwork.TagsObject, options *armnetwork.InterfacesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.InterfacesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewInterfacesServerTransport creates a new instance of InterfacesServerTransport with the provided implementation. +// The returned InterfacesServerTransport instance is connected to an instance of armnetwork.InterfacesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewInterfacesServerTransport(srv *InterfacesServer) *InterfacesServerTransport { + return &InterfacesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.InterfacesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.InterfacesClientDeleteResponse]](), + beginGetEffectiveRouteTable: newTracker[azfake.PollerResponder[armnetwork.InterfacesClientGetEffectiveRouteTableResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.InterfacesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.InterfacesClientListAllResponse]](), + newListCloudServiceNetworkInterfacesPager: newTracker[azfake.PagerResponder[armnetwork.InterfacesClientListCloudServiceNetworkInterfacesResponse]](), + newListCloudServiceRoleInstanceNetworkInterfacesPager: newTracker[azfake.PagerResponder[armnetwork.InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse]](), + beginListEffectiveNetworkSecurityGroups: newTracker[azfake.PollerResponder[armnetwork.InterfacesClientListEffectiveNetworkSecurityGroupsResponse]](), + newListVirtualMachineScaleSetIPConfigurationsPager: newTracker[azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse]](), + newListVirtualMachineScaleSetNetworkInterfacesPager: newTracker[azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse]](), + newListVirtualMachineScaleSetVMNetworkInterfacesPager: newTracker[azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse]](), + } +} + +// InterfacesServerTransport connects instances of armnetwork.InterfacesClient to instances of InterfacesServer. +// Don't use this type directly, use NewInterfacesServerTransport instead. +type InterfacesServerTransport struct { + srv *InterfacesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.InterfacesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.InterfacesClientDeleteResponse]] + beginGetEffectiveRouteTable *tracker[azfake.PollerResponder[armnetwork.InterfacesClientGetEffectiveRouteTableResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.InterfacesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.InterfacesClientListAllResponse]] + newListCloudServiceNetworkInterfacesPager *tracker[azfake.PagerResponder[armnetwork.InterfacesClientListCloudServiceNetworkInterfacesResponse]] + newListCloudServiceRoleInstanceNetworkInterfacesPager *tracker[azfake.PagerResponder[armnetwork.InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse]] + beginListEffectiveNetworkSecurityGroups *tracker[azfake.PollerResponder[armnetwork.InterfacesClientListEffectiveNetworkSecurityGroupsResponse]] + newListVirtualMachineScaleSetIPConfigurationsPager *tracker[azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse]] + newListVirtualMachineScaleSetNetworkInterfacesPager *tracker[azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse]] + newListVirtualMachineScaleSetVMNetworkInterfacesPager *tracker[azfake.PagerResponder[armnetwork.InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse]] +} + +// Do implements the policy.Transporter interface for InterfacesServerTransport. +func (i *InterfacesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "InterfacesClient.BeginCreateOrUpdate": + resp, err = i.dispatchBeginCreateOrUpdate(req) + case "InterfacesClient.BeginDelete": + resp, err = i.dispatchBeginDelete(req) + case "InterfacesClient.Get": + resp, err = i.dispatchGet(req) + case "InterfacesClient.GetCloudServiceNetworkInterface": + resp, err = i.dispatchGetCloudServiceNetworkInterface(req) + case "InterfacesClient.BeginGetEffectiveRouteTable": + resp, err = i.dispatchBeginGetEffectiveRouteTable(req) + case "InterfacesClient.GetVirtualMachineScaleSetIPConfiguration": + resp, err = i.dispatchGetVirtualMachineScaleSetIPConfiguration(req) + case "InterfacesClient.GetVirtualMachineScaleSetNetworkInterface": + resp, err = i.dispatchGetVirtualMachineScaleSetNetworkInterface(req) + case "InterfacesClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + case "InterfacesClient.NewListAllPager": + resp, err = i.dispatchNewListAllPager(req) + case "InterfacesClient.NewListCloudServiceNetworkInterfacesPager": + resp, err = i.dispatchNewListCloudServiceNetworkInterfacesPager(req) + case "InterfacesClient.NewListCloudServiceRoleInstanceNetworkInterfacesPager": + resp, err = i.dispatchNewListCloudServiceRoleInstanceNetworkInterfacesPager(req) + case "InterfacesClient.BeginListEffectiveNetworkSecurityGroups": + resp, err = i.dispatchBeginListEffectiveNetworkSecurityGroups(req) + case "InterfacesClient.NewListVirtualMachineScaleSetIPConfigurationsPager": + resp, err = i.dispatchNewListVirtualMachineScaleSetIPConfigurationsPager(req) + case "InterfacesClient.NewListVirtualMachineScaleSetNetworkInterfacesPager": + resp, err = i.dispatchNewListVirtualMachineScaleSetNetworkInterfacesPager(req) + case "InterfacesClient.NewListVirtualMachineScaleSetVMNetworkInterfacesPager": + resp, err = i.dispatchNewListVirtualMachineScaleSetVMNetworkInterfacesPager(req) + case "InterfacesClient.UpdateTags": + resp, err = i.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := i.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.Interface](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + i.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + i.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if i.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := i.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + i.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + i.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + i.beginDelete.remove(req) + } + + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if i.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.InterfacesClientGetOptions + if expandParam != nil { + options = &armnetwork.InterfacesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.Get(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Interface, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchGetCloudServiceNetworkInterface(req *http.Request) (*http.Response, error) { + if i.srv.GetCloudServiceNetworkInterface == nil { + return nil, &nonRetriableError{errors.New("fake for method GetCloudServiceNetworkInterface not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.InterfacesClientGetCloudServiceNetworkInterfaceOptions + if expandParam != nil { + options = &armnetwork.InterfacesClientGetCloudServiceNetworkInterfaceOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.GetCloudServiceNetworkInterface(req.Context(), resourceGroupNameParam, cloudServiceNameParam, roleInstanceNameParam, networkInterfaceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Interface, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchBeginGetEffectiveRouteTable(req *http.Request) (*http.Response, error) { + if i.srv.BeginGetEffectiveRouteTable == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetEffectiveRouteTable not implemented")} + } + beginGetEffectiveRouteTable := i.beginGetEffectiveRouteTable.get(req) + if beginGetEffectiveRouteTable == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/effectiveRouteTable` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginGetEffectiveRouteTable(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetEffectiveRouteTable = &respr + i.beginGetEffectiveRouteTable.add(req, beginGetEffectiveRouteTable) + } + + resp, err := server.PollerResponderNext(beginGetEffectiveRouteTable, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + i.beginGetEffectiveRouteTable.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetEffectiveRouteTable) { + i.beginGetEffectiveRouteTable.remove(req) + } + + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchGetVirtualMachineScaleSetIPConfiguration(req *http.Request) (*http.Response, error) { + if i.srv.GetVirtualMachineScaleSetIPConfiguration == nil { + return nil, &nonRetriableError{errors.New("fake for method GetVirtualMachineScaleSetIPConfiguration not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + virtualmachineIndexParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualmachineIndex")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + ipConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigurationName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.InterfacesClientGetVirtualMachineScaleSetIPConfigurationOptions + if expandParam != nil { + options = &armnetwork.InterfacesClientGetVirtualMachineScaleSetIPConfigurationOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.GetVirtualMachineScaleSetIPConfiguration(req.Context(), resourceGroupNameParam, virtualMachineScaleSetNameParam, virtualmachineIndexParam, networkInterfaceNameParam, ipConfigurationNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).InterfaceIPConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchGetVirtualMachineScaleSetNetworkInterface(req *http.Request) (*http.Response, error) { + if i.srv.GetVirtualMachineScaleSetNetworkInterface == nil { + return nil, &nonRetriableError{errors.New("fake for method GetVirtualMachineScaleSetNetworkInterface not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + virtualmachineIndexParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualmachineIndex")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions + if expandParam != nil { + options = &armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.GetVirtualMachineScaleSetNetworkInterface(req.Context(), resourceGroupNameParam, virtualMachineScaleSetNameParam, virtualmachineIndexParam, networkInterfaceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Interface, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.InterfacesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := i.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := i.srv.NewListAllPager(nil) + newListAllPager = &resp + i.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.InterfacesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + i.newListAllPager.remove(req) + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchNewListCloudServiceNetworkInterfacesPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListCloudServiceNetworkInterfacesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListCloudServiceNetworkInterfacesPager not implemented")} + } + newListCloudServiceNetworkInterfacesPager := i.newListCloudServiceNetworkInterfacesPager.get(req) + if newListCloudServiceNetworkInterfacesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListCloudServiceNetworkInterfacesPager(resourceGroupNameParam, cloudServiceNameParam, nil) + newListCloudServiceNetworkInterfacesPager = &resp + i.newListCloudServiceNetworkInterfacesPager.add(req, newListCloudServiceNetworkInterfacesPager) + server.PagerResponderInjectNextLinks(newListCloudServiceNetworkInterfacesPager, req, func(page *armnetwork.InterfacesClientListCloudServiceNetworkInterfacesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListCloudServiceNetworkInterfacesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListCloudServiceNetworkInterfacesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListCloudServiceNetworkInterfacesPager) { + i.newListCloudServiceNetworkInterfacesPager.remove(req) + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchNewListCloudServiceRoleInstanceNetworkInterfacesPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListCloudServiceRoleInstanceNetworkInterfacesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListCloudServiceRoleInstanceNetworkInterfacesPager not implemented")} + } + newListCloudServiceRoleInstanceNetworkInterfacesPager := i.newListCloudServiceRoleInstanceNetworkInterfacesPager.get(req) + if newListCloudServiceRoleInstanceNetworkInterfacesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListCloudServiceRoleInstanceNetworkInterfacesPager(resourceGroupNameParam, cloudServiceNameParam, roleInstanceNameParam, nil) + newListCloudServiceRoleInstanceNetworkInterfacesPager = &resp + i.newListCloudServiceRoleInstanceNetworkInterfacesPager.add(req, newListCloudServiceRoleInstanceNetworkInterfacesPager) + server.PagerResponderInjectNextLinks(newListCloudServiceRoleInstanceNetworkInterfacesPager, req, func(page *armnetwork.InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListCloudServiceRoleInstanceNetworkInterfacesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListCloudServiceRoleInstanceNetworkInterfacesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListCloudServiceRoleInstanceNetworkInterfacesPager) { + i.newListCloudServiceRoleInstanceNetworkInterfacesPager.remove(req) + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchBeginListEffectiveNetworkSecurityGroups(req *http.Request) (*http.Response, error) { + if i.srv.BeginListEffectiveNetworkSecurityGroups == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListEffectiveNetworkSecurityGroups not implemented")} + } + beginListEffectiveNetworkSecurityGroups := i.beginListEffectiveNetworkSecurityGroups.get(req) + if beginListEffectiveNetworkSecurityGroups == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/effectiveNetworkSecurityGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginListEffectiveNetworkSecurityGroups(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListEffectiveNetworkSecurityGroups = &respr + i.beginListEffectiveNetworkSecurityGroups.add(req, beginListEffectiveNetworkSecurityGroups) + } + + resp, err := server.PollerResponderNext(beginListEffectiveNetworkSecurityGroups, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + i.beginListEffectiveNetworkSecurityGroups.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListEffectiveNetworkSecurityGroups) { + i.beginListEffectiveNetworkSecurityGroups.remove(req) + } + + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchNewListVirtualMachineScaleSetIPConfigurationsPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListVirtualMachineScaleSetIPConfigurationsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListVirtualMachineScaleSetIPConfigurationsPager not implemented")} + } + newListVirtualMachineScaleSetIPConfigurationsPager := i.newListVirtualMachineScaleSetIPConfigurationsPager.get(req) + if newListVirtualMachineScaleSetIPConfigurationsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + virtualmachineIndexParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualmachineIndex")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.InterfacesClientListVirtualMachineScaleSetIPConfigurationsOptions + if expandParam != nil { + options = &armnetwork.InterfacesClientListVirtualMachineScaleSetIPConfigurationsOptions{ + Expand: expandParam, + } + } + resp := i.srv.NewListVirtualMachineScaleSetIPConfigurationsPager(resourceGroupNameParam, virtualMachineScaleSetNameParam, virtualmachineIndexParam, networkInterfaceNameParam, options) + newListVirtualMachineScaleSetIPConfigurationsPager = &resp + i.newListVirtualMachineScaleSetIPConfigurationsPager.add(req, newListVirtualMachineScaleSetIPConfigurationsPager) + server.PagerResponderInjectNextLinks(newListVirtualMachineScaleSetIPConfigurationsPager, req, func(page *armnetwork.InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListVirtualMachineScaleSetIPConfigurationsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListVirtualMachineScaleSetIPConfigurationsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListVirtualMachineScaleSetIPConfigurationsPager) { + i.newListVirtualMachineScaleSetIPConfigurationsPager.remove(req) + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchNewListVirtualMachineScaleSetNetworkInterfacesPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListVirtualMachineScaleSetNetworkInterfacesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListVirtualMachineScaleSetNetworkInterfacesPager not implemented")} + } + newListVirtualMachineScaleSetNetworkInterfacesPager := i.newListVirtualMachineScaleSetNetworkInterfacesPager.get(req) + if newListVirtualMachineScaleSetNetworkInterfacesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListVirtualMachineScaleSetNetworkInterfacesPager(resourceGroupNameParam, virtualMachineScaleSetNameParam, nil) + newListVirtualMachineScaleSetNetworkInterfacesPager = &resp + i.newListVirtualMachineScaleSetNetworkInterfacesPager.add(req, newListVirtualMachineScaleSetNetworkInterfacesPager) + server.PagerResponderInjectNextLinks(newListVirtualMachineScaleSetNetworkInterfacesPager, req, func(page *armnetwork.InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListVirtualMachineScaleSetNetworkInterfacesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListVirtualMachineScaleSetNetworkInterfacesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListVirtualMachineScaleSetNetworkInterfacesPager) { + i.newListVirtualMachineScaleSetNetworkInterfacesPager.remove(req) + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchNewListVirtualMachineScaleSetVMNetworkInterfacesPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListVirtualMachineScaleSetVMNetworkInterfacesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListVirtualMachineScaleSetVMNetworkInterfacesPager not implemented")} + } + newListVirtualMachineScaleSetVMNetworkInterfacesPager := i.newListVirtualMachineScaleSetVMNetworkInterfacesPager.get(req) + if newListVirtualMachineScaleSetVMNetworkInterfacesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + virtualmachineIndexParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualmachineIndex")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListVirtualMachineScaleSetVMNetworkInterfacesPager(resourceGroupNameParam, virtualMachineScaleSetNameParam, virtualmachineIndexParam, nil) + newListVirtualMachineScaleSetVMNetworkInterfacesPager = &resp + i.newListVirtualMachineScaleSetVMNetworkInterfacesPager.add(req, newListVirtualMachineScaleSetVMNetworkInterfacesPager) + server.PagerResponderInjectNextLinks(newListVirtualMachineScaleSetVMNetworkInterfacesPager, req, func(page *armnetwork.InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListVirtualMachineScaleSetVMNetworkInterfacesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListVirtualMachineScaleSetVMNetworkInterfacesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListVirtualMachineScaleSetVMNetworkInterfacesPager) { + i.newListVirtualMachineScaleSetVMNetworkInterfacesPager.remove(req) + } + return resp, nil +} + +func (i *InterfacesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if i.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.UpdateTags(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Interface, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfacetapconfigurations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfacetapconfigurations_server.go new file mode 100644 index 00000000000..f8d407eee35 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/interfacetapconfigurations_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// InterfaceTapConfigurationsServer is a fake server for instances of the armnetwork.InterfaceTapConfigurationsClient type. +type InterfaceTapConfigurationsServer struct { + // BeginCreateOrUpdate is the fake for method InterfaceTapConfigurationsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkInterfaceName string, tapConfigurationName string, tapConfigurationParameters armnetwork.InterfaceTapConfiguration, options *armnetwork.InterfaceTapConfigurationsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.InterfaceTapConfigurationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method InterfaceTapConfigurationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkInterfaceName string, tapConfigurationName string, options *armnetwork.InterfaceTapConfigurationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.InterfaceTapConfigurationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method InterfaceTapConfigurationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkInterfaceName string, tapConfigurationName string, options *armnetwork.InterfaceTapConfigurationsClientGetOptions) (resp azfake.Responder[armnetwork.InterfaceTapConfigurationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method InterfaceTapConfigurationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfaceTapConfigurationsClientListOptions) (resp azfake.PagerResponder[armnetwork.InterfaceTapConfigurationsClientListResponse]) +} + +// NewInterfaceTapConfigurationsServerTransport creates a new instance of InterfaceTapConfigurationsServerTransport with the provided implementation. +// The returned InterfaceTapConfigurationsServerTransport instance is connected to an instance of armnetwork.InterfaceTapConfigurationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewInterfaceTapConfigurationsServerTransport(srv *InterfaceTapConfigurationsServer) *InterfaceTapConfigurationsServerTransport { + return &InterfaceTapConfigurationsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.InterfaceTapConfigurationsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.InterfaceTapConfigurationsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.InterfaceTapConfigurationsClientListResponse]](), + } +} + +// InterfaceTapConfigurationsServerTransport connects instances of armnetwork.InterfaceTapConfigurationsClient to instances of InterfaceTapConfigurationsServer. +// Don't use this type directly, use NewInterfaceTapConfigurationsServerTransport instead. +type InterfaceTapConfigurationsServerTransport struct { + srv *InterfaceTapConfigurationsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.InterfaceTapConfigurationsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.InterfaceTapConfigurationsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.InterfaceTapConfigurationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for InterfaceTapConfigurationsServerTransport. +func (i *InterfaceTapConfigurationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "InterfaceTapConfigurationsClient.BeginCreateOrUpdate": + resp, err = i.dispatchBeginCreateOrUpdate(req) + case "InterfaceTapConfigurationsClient.BeginDelete": + resp, err = i.dispatchBeginDelete(req) + case "InterfaceTapConfigurationsClient.Get": + resp, err = i.dispatchGet(req) + case "InterfaceTapConfigurationsClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *InterfaceTapConfigurationsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := i.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/tapConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.InterfaceTapConfiguration](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + tapConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("tapConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, tapConfigurationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + i.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + i.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (i *InterfaceTapConfigurationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if i.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := i.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/tapConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + tapConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("tapConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, tapConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + i.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + i.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + i.beginDelete.remove(req) + } + + return resp, nil +} + +func (i *InterfaceTapConfigurationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if i.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/tapConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + tapConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("tapConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.Get(req.Context(), resourceGroupNameParam, networkInterfaceNameParam, tapConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).InterfaceTapConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *InterfaceTapConfigurationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/tapConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListPager(resourceGroupNameParam, networkInterfaceNameParam, nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.InterfaceTapConfigurationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/internal.go new file mode 100644 index 00000000000..e0173df3fae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/internal.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "io" + "net/http" + "reflect" + "sync" +) + +type nonRetriableError struct { + error +} + +func (nonRetriableError) NonRetriable() { + // marker method +} + +func contains[T comparable](s []T, v T) bool { + for _, vv := range s { + if vv == v { + return true + } + } + return false +} + +func getHeaderValue(h http.Header, k string) string { + v := h[k] + if len(v) == 0 { + return "" + } + return v[0] +} + +func getOptional[T any](v T) *T { + if reflect.ValueOf(v).IsZero() { + return nil + } + return &v +} + +func parseOptional[T any](v string, parse func(v string) (T, error)) (*T, error) { + if v == "" { + return nil, nil + } + t, err := parse(v) + if err != nil { + return nil, err + } + return &t, err +} + +func readRequestBody(req *http.Request) ([]byte, error) { + if req.Body == nil { + return nil, nil + } + body, err := io.ReadAll(req.Body) + if err != nil { + return nil, err + } + req.Body.Close() + return body, nil +} + +func newTracker[T any]() *tracker[T] { + return &tracker[T]{ + items: map[string]*T{}, + } +} + +type tracker[T any] struct { + items map[string]*T + mu sync.Mutex +} + +func (p *tracker[T]) get(req *http.Request) *T { + p.mu.Lock() + defer p.mu.Unlock() + if item, ok := p.items[server.SanitizePagerPollerPath(req.URL.Path)]; ok { + return item + } + return nil +} + +func (p *tracker[T]) add(req *http.Request, item *T) { + p.mu.Lock() + defer p.mu.Unlock() + p.items[server.SanitizePagerPollerPath(req.URL.Path)] = item +} + +func (p *tracker[T]) remove(req *http.Request) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.items, server.SanitizePagerPollerPath(req.URL.Path)) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ipallocations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ipallocations_server.go new file mode 100644 index 00000000000..8515ff9aaba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ipallocations_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// IPAllocationsServer is a fake server for instances of the armnetwork.IPAllocationsClient type. +type IPAllocationsServer struct { + // BeginCreateOrUpdate is the fake for method IPAllocationsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters armnetwork.IPAllocation, options *armnetwork.IPAllocationsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.IPAllocationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method IPAllocationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, ipAllocationName string, options *armnetwork.IPAllocationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.IPAllocationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method IPAllocationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, ipAllocationName string, options *armnetwork.IPAllocationsClientGetOptions) (resp azfake.Responder[armnetwork.IPAllocationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method IPAllocationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.IPAllocationsClientListOptions) (resp azfake.PagerResponder[armnetwork.IPAllocationsClientListResponse]) + + // NewListByResourceGroupPager is the fake for method IPAllocationsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.IPAllocationsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.IPAllocationsClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method IPAllocationsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters armnetwork.TagsObject, options *armnetwork.IPAllocationsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.IPAllocationsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewIPAllocationsServerTransport creates a new instance of IPAllocationsServerTransport with the provided implementation. +// The returned IPAllocationsServerTransport instance is connected to an instance of armnetwork.IPAllocationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewIPAllocationsServerTransport(srv *IPAllocationsServer) *IPAllocationsServerTransport { + return &IPAllocationsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.IPAllocationsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.IPAllocationsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.IPAllocationsClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.IPAllocationsClientListByResourceGroupResponse]](), + } +} + +// IPAllocationsServerTransport connects instances of armnetwork.IPAllocationsClient to instances of IPAllocationsServer. +// Don't use this type directly, use NewIPAllocationsServerTransport instead. +type IPAllocationsServerTransport struct { + srv *IPAllocationsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.IPAllocationsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.IPAllocationsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.IPAllocationsClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.IPAllocationsClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for IPAllocationsServerTransport. +func (i *IPAllocationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "IPAllocationsClient.BeginCreateOrUpdate": + resp, err = i.dispatchBeginCreateOrUpdate(req) + case "IPAllocationsClient.BeginDelete": + resp, err = i.dispatchBeginDelete(req) + case "IPAllocationsClient.Get": + resp, err = i.dispatchGet(req) + case "IPAllocationsClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + case "IPAllocationsClient.NewListByResourceGroupPager": + resp, err = i.dispatchNewListByResourceGroupPager(req) + case "IPAllocationsClient.UpdateTags": + resp, err = i.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *IPAllocationsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := i.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/IpAllocations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.IPAllocation](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipAllocationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipAllocationName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, ipAllocationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + i.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + i.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (i *IPAllocationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if i.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := i.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/IpAllocations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipAllocationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipAllocationName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginDelete(req.Context(), resourceGroupNameParam, ipAllocationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + i.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + i.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + i.beginDelete.remove(req) + } + + return resp, nil +} + +func (i *IPAllocationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if i.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/IpAllocations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipAllocationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipAllocationName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.IPAllocationsClientGetOptions + if expandParam != nil { + options = &armnetwork.IPAllocationsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.Get(req.Context(), resourceGroupNameParam, ipAllocationNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).IPAllocation, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *IPAllocationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/IpAllocations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := i.srv.NewListPager(nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.IPAllocationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} + +func (i *IPAllocationsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := i.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/IpAllocations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + i.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.IPAllocationsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + i.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (i *IPAllocationsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if i.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/IpAllocations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipAllocationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipAllocationName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.UpdateTags(req.Context(), resourceGroupNameParam, ipAllocationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).IPAllocation, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ipgroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ipgroups_server.go new file mode 100644 index 00000000000..6c1a4dd330a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/ipgroups_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// IPGroupsServer is a fake server for instances of the armnetwork.IPGroupsClient type. +type IPGroupsServer struct { + // BeginCreateOrUpdate is the fake for method IPGroupsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, ipGroupsName string, parameters armnetwork.IPGroup, options *armnetwork.IPGroupsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.IPGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method IPGroupsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, ipGroupsName string, options *armnetwork.IPGroupsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.IPGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method IPGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, ipGroupsName string, options *armnetwork.IPGroupsClientGetOptions) (resp azfake.Responder[armnetwork.IPGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method IPGroupsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.IPGroupsClientListOptions) (resp azfake.PagerResponder[armnetwork.IPGroupsClientListResponse]) + + // NewListByResourceGroupPager is the fake for method IPGroupsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.IPGroupsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.IPGroupsClientListByResourceGroupResponse]) + + // UpdateGroups is the fake for method IPGroupsClient.UpdateGroups + // HTTP status codes to indicate success: http.StatusOK + UpdateGroups func(ctx context.Context, resourceGroupName string, ipGroupsName string, parameters armnetwork.TagsObject, options *armnetwork.IPGroupsClientUpdateGroupsOptions) (resp azfake.Responder[armnetwork.IPGroupsClientUpdateGroupsResponse], errResp azfake.ErrorResponder) +} + +// NewIPGroupsServerTransport creates a new instance of IPGroupsServerTransport with the provided implementation. +// The returned IPGroupsServerTransport instance is connected to an instance of armnetwork.IPGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewIPGroupsServerTransport(srv *IPGroupsServer) *IPGroupsServerTransport { + return &IPGroupsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.IPGroupsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.IPGroupsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.IPGroupsClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.IPGroupsClientListByResourceGroupResponse]](), + } +} + +// IPGroupsServerTransport connects instances of armnetwork.IPGroupsClient to instances of IPGroupsServer. +// Don't use this type directly, use NewIPGroupsServerTransport instead. +type IPGroupsServerTransport struct { + srv *IPGroupsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.IPGroupsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.IPGroupsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.IPGroupsClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.IPGroupsClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for IPGroupsServerTransport. +func (i *IPGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "IPGroupsClient.BeginCreateOrUpdate": + resp, err = i.dispatchBeginCreateOrUpdate(req) + case "IPGroupsClient.BeginDelete": + resp, err = i.dispatchBeginDelete(req) + case "IPGroupsClient.Get": + resp, err = i.dispatchGet(req) + case "IPGroupsClient.NewListPager": + resp, err = i.dispatchNewListPager(req) + case "IPGroupsClient.NewListByResourceGroupPager": + resp, err = i.dispatchNewListByResourceGroupPager(req) + case "IPGroupsClient.UpdateGroups": + resp, err = i.dispatchUpdateGroups(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (i *IPGroupsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if i.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := i.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ipGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.IPGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipGroupsNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipGroupsName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, ipGroupsNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + i.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + i.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + i.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (i *IPGroupsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if i.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := i.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ipGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipGroupsNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipGroupsName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.BeginDelete(req.Context(), resourceGroupNameParam, ipGroupsNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + i.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + i.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + i.beginDelete.remove(req) + } + + return resp, nil +} + +func (i *IPGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if i.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ipGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipGroupsNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipGroupsName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.IPGroupsClientGetOptions + if expandParam != nil { + options = &armnetwork.IPGroupsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := i.srv.Get(req.Context(), resourceGroupNameParam, ipGroupsNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).IPGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (i *IPGroupsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := i.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ipGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := i.srv.NewListPager(nil) + newListPager = &resp + i.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.IPGroupsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + i.newListPager.remove(req) + } + return resp, nil +} + +func (i *IPGroupsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if i.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := i.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ipGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := i.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + i.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.IPGroupsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + i.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + i.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (i *IPGroupsServerTransport) dispatchUpdateGroups(req *http.Request) (*http.Response, error) { + if i.srv.UpdateGroups == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateGroups not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ipGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + ipGroupsNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipGroupsName")]) + if err != nil { + return nil, err + } + respr, errRespr := i.srv.UpdateGroups(req.Context(), resourceGroupNameParam, ipGroupsNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).IPGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerbackendaddresspools_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerbackendaddresspools_server.go new file mode 100644 index 00000000000..cfff701ccd0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerbackendaddresspools_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// LoadBalancerBackendAddressPoolsServer is a fake server for instances of the armnetwork.LoadBalancerBackendAddressPoolsClient type. +type LoadBalancerBackendAddressPoolsServer struct { + // BeginCreateOrUpdate is the fake for method LoadBalancerBackendAddressPoolsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string, parameters armnetwork.BackendAddressPool, options *armnetwork.LoadBalancerBackendAddressPoolsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method LoadBalancerBackendAddressPoolsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string, options *armnetwork.LoadBalancerBackendAddressPoolsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method LoadBalancerBackendAddressPoolsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string, options *armnetwork.LoadBalancerBackendAddressPoolsClientGetOptions) (resp azfake.Responder[armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method LoadBalancerBackendAddressPoolsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancerBackendAddressPoolsClientListOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientListResponse]) +} + +// NewLoadBalancerBackendAddressPoolsServerTransport creates a new instance of LoadBalancerBackendAddressPoolsServerTransport with the provided implementation. +// The returned LoadBalancerBackendAddressPoolsServerTransport instance is connected to an instance of armnetwork.LoadBalancerBackendAddressPoolsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLoadBalancerBackendAddressPoolsServerTransport(srv *LoadBalancerBackendAddressPoolsServer) *LoadBalancerBackendAddressPoolsServerTransport { + return &LoadBalancerBackendAddressPoolsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientListResponse]](), + } +} + +// LoadBalancerBackendAddressPoolsServerTransport connects instances of armnetwork.LoadBalancerBackendAddressPoolsClient to instances of LoadBalancerBackendAddressPoolsServer. +// Don't use this type directly, use NewLoadBalancerBackendAddressPoolsServerTransport instead. +type LoadBalancerBackendAddressPoolsServerTransport struct { + srv *LoadBalancerBackendAddressPoolsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancerBackendAddressPoolsClientListResponse]] +} + +// Do implements the policy.Transporter interface for LoadBalancerBackendAddressPoolsServerTransport. +func (l *LoadBalancerBackendAddressPoolsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LoadBalancerBackendAddressPoolsClient.BeginCreateOrUpdate": + resp, err = l.dispatchBeginCreateOrUpdate(req) + case "LoadBalancerBackendAddressPoolsClient.BeginDelete": + resp, err = l.dispatchBeginDelete(req) + case "LoadBalancerBackendAddressPoolsClient.Get": + resp, err = l.dispatchGet(req) + case "LoadBalancerBackendAddressPoolsClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LoadBalancerBackendAddressPoolsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if l.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := l.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/backendAddressPools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.BackendAddressPool](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + backendAddressPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("backendAddressPoolName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, loadBalancerNameParam, backendAddressPoolNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + l.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + l.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + l.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (l *LoadBalancerBackendAddressPoolsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if l.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := l.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/backendAddressPools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + backendAddressPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("backendAddressPoolName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginDelete(req.Context(), resourceGroupNameParam, loadBalancerNameParam, backendAddressPoolNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + l.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + l.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + l.beginDelete.remove(req) + } + + return resp, nil +} + +func (l *LoadBalancerBackendAddressPoolsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if l.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/backendAddressPools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + backendAddressPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("backendAddressPoolName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.Get(req.Context(), resourceGroupNameParam, loadBalancerNameParam, backendAddressPoolNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).BackendAddressPool, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LoadBalancerBackendAddressPoolsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/backendAddressPools` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, loadBalancerNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LoadBalancerBackendAddressPoolsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerfrontendipconfigurations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerfrontendipconfigurations_server.go new file mode 100644 index 00000000000..356d59170bb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerfrontendipconfigurations_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// LoadBalancerFrontendIPConfigurationsServer is a fake server for instances of the armnetwork.LoadBalancerFrontendIPConfigurationsClient type. +type LoadBalancerFrontendIPConfigurationsServer struct { + // Get is the fake for method LoadBalancerFrontendIPConfigurationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, loadBalancerName string, frontendIPConfigurationName string, options *armnetwork.LoadBalancerFrontendIPConfigurationsClientGetOptions) (resp azfake.Responder[armnetwork.LoadBalancerFrontendIPConfigurationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method LoadBalancerFrontendIPConfigurationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancerFrontendIPConfigurationsClientListOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancerFrontendIPConfigurationsClientListResponse]) +} + +// NewLoadBalancerFrontendIPConfigurationsServerTransport creates a new instance of LoadBalancerFrontendIPConfigurationsServerTransport with the provided implementation. +// The returned LoadBalancerFrontendIPConfigurationsServerTransport instance is connected to an instance of armnetwork.LoadBalancerFrontendIPConfigurationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLoadBalancerFrontendIPConfigurationsServerTransport(srv *LoadBalancerFrontendIPConfigurationsServer) *LoadBalancerFrontendIPConfigurationsServerTransport { + return &LoadBalancerFrontendIPConfigurationsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancerFrontendIPConfigurationsClientListResponse]](), + } +} + +// LoadBalancerFrontendIPConfigurationsServerTransport connects instances of armnetwork.LoadBalancerFrontendIPConfigurationsClient to instances of LoadBalancerFrontendIPConfigurationsServer. +// Don't use this type directly, use NewLoadBalancerFrontendIPConfigurationsServerTransport instead. +type LoadBalancerFrontendIPConfigurationsServerTransport struct { + srv *LoadBalancerFrontendIPConfigurationsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancerFrontendIPConfigurationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for LoadBalancerFrontendIPConfigurationsServerTransport. +func (l *LoadBalancerFrontendIPConfigurationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LoadBalancerFrontendIPConfigurationsClient.Get": + resp, err = l.dispatchGet(req) + case "LoadBalancerFrontendIPConfigurationsClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LoadBalancerFrontendIPConfigurationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if l.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/frontendIPConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + frontendIPConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("frontendIPConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.Get(req.Context(), resourceGroupNameParam, loadBalancerNameParam, frontendIPConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).FrontendIPConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LoadBalancerFrontendIPConfigurationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/frontendIPConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, loadBalancerNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LoadBalancerFrontendIPConfigurationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerloadbalancingrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerloadbalancingrules_server.go new file mode 100644 index 00000000000..518b85fc335 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerloadbalancingrules_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// LoadBalancerLoadBalancingRulesServer is a fake server for instances of the armnetwork.LoadBalancerLoadBalancingRulesClient type. +type LoadBalancerLoadBalancingRulesServer struct { + // Get is the fake for method LoadBalancerLoadBalancingRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, loadBalancerName string, loadBalancingRuleName string, options *armnetwork.LoadBalancerLoadBalancingRulesClientGetOptions) (resp azfake.Responder[armnetwork.LoadBalancerLoadBalancingRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method LoadBalancerLoadBalancingRulesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancerLoadBalancingRulesClientListOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancerLoadBalancingRulesClientListResponse]) +} + +// NewLoadBalancerLoadBalancingRulesServerTransport creates a new instance of LoadBalancerLoadBalancingRulesServerTransport with the provided implementation. +// The returned LoadBalancerLoadBalancingRulesServerTransport instance is connected to an instance of armnetwork.LoadBalancerLoadBalancingRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLoadBalancerLoadBalancingRulesServerTransport(srv *LoadBalancerLoadBalancingRulesServer) *LoadBalancerLoadBalancingRulesServerTransport { + return &LoadBalancerLoadBalancingRulesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancerLoadBalancingRulesClientListResponse]](), + } +} + +// LoadBalancerLoadBalancingRulesServerTransport connects instances of armnetwork.LoadBalancerLoadBalancingRulesClient to instances of LoadBalancerLoadBalancingRulesServer. +// Don't use this type directly, use NewLoadBalancerLoadBalancingRulesServerTransport instead. +type LoadBalancerLoadBalancingRulesServerTransport struct { + srv *LoadBalancerLoadBalancingRulesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancerLoadBalancingRulesClientListResponse]] +} + +// Do implements the policy.Transporter interface for LoadBalancerLoadBalancingRulesServerTransport. +func (l *LoadBalancerLoadBalancingRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LoadBalancerLoadBalancingRulesClient.Get": + resp, err = l.dispatchGet(req) + case "LoadBalancerLoadBalancingRulesClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LoadBalancerLoadBalancingRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if l.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/loadBalancingRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + loadBalancingRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancingRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.Get(req.Context(), resourceGroupNameParam, loadBalancerNameParam, loadBalancingRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).LoadBalancingRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LoadBalancerLoadBalancingRulesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/loadBalancingRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, loadBalancerNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LoadBalancerLoadBalancingRulesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancernetworkinterfaces_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancernetworkinterfaces_server.go new file mode 100644 index 00000000000..693ff0b99ea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancernetworkinterfaces_server.go @@ -0,0 +1,112 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// LoadBalancerNetworkInterfacesServer is a fake server for instances of the armnetwork.LoadBalancerNetworkInterfacesClient type. +type LoadBalancerNetworkInterfacesServer struct { + // NewListPager is the fake for method LoadBalancerNetworkInterfacesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancerNetworkInterfacesClientListOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancerNetworkInterfacesClientListResponse]) +} + +// NewLoadBalancerNetworkInterfacesServerTransport creates a new instance of LoadBalancerNetworkInterfacesServerTransport with the provided implementation. +// The returned LoadBalancerNetworkInterfacesServerTransport instance is connected to an instance of armnetwork.LoadBalancerNetworkInterfacesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLoadBalancerNetworkInterfacesServerTransport(srv *LoadBalancerNetworkInterfacesServer) *LoadBalancerNetworkInterfacesServerTransport { + return &LoadBalancerNetworkInterfacesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancerNetworkInterfacesClientListResponse]](), + } +} + +// LoadBalancerNetworkInterfacesServerTransport connects instances of armnetwork.LoadBalancerNetworkInterfacesClient to instances of LoadBalancerNetworkInterfacesServer. +// Don't use this type directly, use NewLoadBalancerNetworkInterfacesServerTransport instead. +type LoadBalancerNetworkInterfacesServerTransport struct { + srv *LoadBalancerNetworkInterfacesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancerNetworkInterfacesClientListResponse]] +} + +// Do implements the policy.Transporter interface for LoadBalancerNetworkInterfacesServerTransport. +func (l *LoadBalancerNetworkInterfacesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LoadBalancerNetworkInterfacesClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LoadBalancerNetworkInterfacesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, loadBalancerNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LoadBalancerNetworkInterfacesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalanceroutboundrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalanceroutboundrules_server.go new file mode 100644 index 00000000000..d38864d9688 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalanceroutboundrules_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// LoadBalancerOutboundRulesServer is a fake server for instances of the armnetwork.LoadBalancerOutboundRulesClient type. +type LoadBalancerOutboundRulesServer struct { + // Get is the fake for method LoadBalancerOutboundRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, loadBalancerName string, outboundRuleName string, options *armnetwork.LoadBalancerOutboundRulesClientGetOptions) (resp azfake.Responder[armnetwork.LoadBalancerOutboundRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method LoadBalancerOutboundRulesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancerOutboundRulesClientListOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancerOutboundRulesClientListResponse]) +} + +// NewLoadBalancerOutboundRulesServerTransport creates a new instance of LoadBalancerOutboundRulesServerTransport with the provided implementation. +// The returned LoadBalancerOutboundRulesServerTransport instance is connected to an instance of armnetwork.LoadBalancerOutboundRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLoadBalancerOutboundRulesServerTransport(srv *LoadBalancerOutboundRulesServer) *LoadBalancerOutboundRulesServerTransport { + return &LoadBalancerOutboundRulesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancerOutboundRulesClientListResponse]](), + } +} + +// LoadBalancerOutboundRulesServerTransport connects instances of armnetwork.LoadBalancerOutboundRulesClient to instances of LoadBalancerOutboundRulesServer. +// Don't use this type directly, use NewLoadBalancerOutboundRulesServerTransport instead. +type LoadBalancerOutboundRulesServerTransport struct { + srv *LoadBalancerOutboundRulesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancerOutboundRulesClientListResponse]] +} + +// Do implements the policy.Transporter interface for LoadBalancerOutboundRulesServerTransport. +func (l *LoadBalancerOutboundRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LoadBalancerOutboundRulesClient.Get": + resp, err = l.dispatchGet(req) + case "LoadBalancerOutboundRulesClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LoadBalancerOutboundRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if l.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/outboundRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + outboundRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("outboundRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.Get(req.Context(), resourceGroupNameParam, loadBalancerNameParam, outboundRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).OutboundRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LoadBalancerOutboundRulesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/outboundRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, loadBalancerNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LoadBalancerOutboundRulesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerprobes_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerprobes_server.go new file mode 100644 index 00000000000..040097ff24d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancerprobes_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// LoadBalancerProbesServer is a fake server for instances of the armnetwork.LoadBalancerProbesClient type. +type LoadBalancerProbesServer struct { + // Get is the fake for method LoadBalancerProbesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, loadBalancerName string, probeName string, options *armnetwork.LoadBalancerProbesClientGetOptions) (resp azfake.Responder[armnetwork.LoadBalancerProbesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method LoadBalancerProbesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancerProbesClientListOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancerProbesClientListResponse]) +} + +// NewLoadBalancerProbesServerTransport creates a new instance of LoadBalancerProbesServerTransport with the provided implementation. +// The returned LoadBalancerProbesServerTransport instance is connected to an instance of armnetwork.LoadBalancerProbesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLoadBalancerProbesServerTransport(srv *LoadBalancerProbesServer) *LoadBalancerProbesServerTransport { + return &LoadBalancerProbesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancerProbesClientListResponse]](), + } +} + +// LoadBalancerProbesServerTransport connects instances of armnetwork.LoadBalancerProbesClient to instances of LoadBalancerProbesServer. +// Don't use this type directly, use NewLoadBalancerProbesServerTransport instead. +type LoadBalancerProbesServerTransport struct { + srv *LoadBalancerProbesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancerProbesClientListResponse]] +} + +// Do implements the policy.Transporter interface for LoadBalancerProbesServerTransport. +func (l *LoadBalancerProbesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LoadBalancerProbesClient.Get": + resp, err = l.dispatchGet(req) + case "LoadBalancerProbesClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LoadBalancerProbesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if l.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/probes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + probeNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("probeName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.Get(req.Context(), resourceGroupNameParam, loadBalancerNameParam, probeNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Probe, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LoadBalancerProbesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/probes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, loadBalancerNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LoadBalancerProbesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancers_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancers_server.go new file mode 100644 index 00000000000..c0609148837 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/loadbalancers_server.go @@ -0,0 +1,514 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// LoadBalancersServer is a fake server for instances of the armnetwork.LoadBalancersClient type. +type LoadBalancersServer struct { + // BeginCreateOrUpdate is the fake for method LoadBalancersClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters armnetwork.LoadBalancer, options *armnetwork.LoadBalancersClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.LoadBalancersClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method LoadBalancersClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancersClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.LoadBalancersClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method LoadBalancersClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, loadBalancerName string, options *armnetwork.LoadBalancersClientGetOptions) (resp azfake.Responder[armnetwork.LoadBalancersClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method LoadBalancersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.LoadBalancersClientListOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancersClientListResponse]) + + // NewListAllPager is the fake for method LoadBalancersClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.LoadBalancersClientListAllOptions) (resp azfake.PagerResponder[armnetwork.LoadBalancersClientListAllResponse]) + + // BeginListInboundNatRulePortMappings is the fake for method LoadBalancersClient.BeginListInboundNatRulePortMappings + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListInboundNatRulePortMappings func(ctx context.Context, groupName string, loadBalancerName string, backendPoolName string, parameters armnetwork.QueryInboundNatRulePortMappingRequest, options *armnetwork.LoadBalancersClientBeginListInboundNatRulePortMappingsOptions) (resp azfake.PollerResponder[armnetwork.LoadBalancersClientListInboundNatRulePortMappingsResponse], errResp azfake.ErrorResponder) + + // MigrateToIPBased is the fake for method LoadBalancersClient.MigrateToIPBased + // HTTP status codes to indicate success: http.StatusOK + MigrateToIPBased func(ctx context.Context, groupName string, loadBalancerName string, options *armnetwork.LoadBalancersClientMigrateToIPBasedOptions) (resp azfake.Responder[armnetwork.LoadBalancersClientMigrateToIPBasedResponse], errResp azfake.ErrorResponder) + + // BeginSwapPublicIPAddresses is the fake for method LoadBalancersClient.BeginSwapPublicIPAddresses + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginSwapPublicIPAddresses func(ctx context.Context, location string, parameters armnetwork.LoadBalancerVipSwapRequest, options *armnetwork.LoadBalancersClientBeginSwapPublicIPAddressesOptions) (resp azfake.PollerResponder[armnetwork.LoadBalancersClientSwapPublicIPAddressesResponse], errResp azfake.ErrorResponder) + + // UpdateTags is the fake for method LoadBalancersClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters armnetwork.TagsObject, options *armnetwork.LoadBalancersClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.LoadBalancersClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewLoadBalancersServerTransport creates a new instance of LoadBalancersServerTransport with the provided implementation. +// The returned LoadBalancersServerTransport instance is connected to an instance of armnetwork.LoadBalancersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLoadBalancersServerTransport(srv *LoadBalancersServer) *LoadBalancersServerTransport { + return &LoadBalancersServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.LoadBalancersClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.LoadBalancersClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancersClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.LoadBalancersClientListAllResponse]](), + beginListInboundNatRulePortMappings: newTracker[azfake.PollerResponder[armnetwork.LoadBalancersClientListInboundNatRulePortMappingsResponse]](), + beginSwapPublicIPAddresses: newTracker[azfake.PollerResponder[armnetwork.LoadBalancersClientSwapPublicIPAddressesResponse]](), + } +} + +// LoadBalancersServerTransport connects instances of armnetwork.LoadBalancersClient to instances of LoadBalancersServer. +// Don't use this type directly, use NewLoadBalancersServerTransport instead. +type LoadBalancersServerTransport struct { + srv *LoadBalancersServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.LoadBalancersClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.LoadBalancersClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancersClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.LoadBalancersClientListAllResponse]] + beginListInboundNatRulePortMappings *tracker[azfake.PollerResponder[armnetwork.LoadBalancersClientListInboundNatRulePortMappingsResponse]] + beginSwapPublicIPAddresses *tracker[azfake.PollerResponder[armnetwork.LoadBalancersClientSwapPublicIPAddressesResponse]] +} + +// Do implements the policy.Transporter interface for LoadBalancersServerTransport. +func (l *LoadBalancersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LoadBalancersClient.BeginCreateOrUpdate": + resp, err = l.dispatchBeginCreateOrUpdate(req) + case "LoadBalancersClient.BeginDelete": + resp, err = l.dispatchBeginDelete(req) + case "LoadBalancersClient.Get": + resp, err = l.dispatchGet(req) + case "LoadBalancersClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + case "LoadBalancersClient.NewListAllPager": + resp, err = l.dispatchNewListAllPager(req) + case "LoadBalancersClient.BeginListInboundNatRulePortMappings": + resp, err = l.dispatchBeginListInboundNatRulePortMappings(req) + case "LoadBalancersClient.MigrateToIPBased": + resp, err = l.dispatchMigrateToIPBased(req) + case "LoadBalancersClient.BeginSwapPublicIPAddresses": + resp, err = l.dispatchBeginSwapPublicIPAddresses(req) + case "LoadBalancersClient.UpdateTags": + resp, err = l.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if l.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := l.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.LoadBalancer](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, loadBalancerNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + l.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + l.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + l.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if l.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := l.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginDelete(req.Context(), resourceGroupNameParam, loadBalancerNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + l.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + l.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + l.beginDelete.remove(req) + } + + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if l.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.LoadBalancersClientGetOptions + if expandParam != nil { + options = &armnetwork.LoadBalancersClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := l.srv.Get(req.Context(), resourceGroupNameParam, loadBalancerNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).LoadBalancer, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LoadBalancersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := l.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := l.srv.NewListAllPager(nil) + newListAllPager = &resp + l.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.LoadBalancersClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + l.newListAllPager.remove(req) + } + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchBeginListInboundNatRulePortMappings(req *http.Request) (*http.Response, error) { + if l.srv.BeginListInboundNatRulePortMappings == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListInboundNatRulePortMappings not implemented")} + } + beginListInboundNatRulePortMappings := l.beginListInboundNatRulePortMappings.get(req) + if beginListInboundNatRulePortMappings == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/backendAddressPools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/queryInboundNatRulePortMapping` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.QueryInboundNatRulePortMappingRequest](req) + if err != nil { + return nil, err + } + groupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("groupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + backendPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("backendPoolName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginListInboundNatRulePortMappings(req.Context(), groupNameParam, loadBalancerNameParam, backendPoolNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListInboundNatRulePortMappings = &respr + l.beginListInboundNatRulePortMappings.add(req, beginListInboundNatRulePortMappings) + } + + resp, err := server.PollerResponderNext(beginListInboundNatRulePortMappings, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + l.beginListInboundNatRulePortMappings.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListInboundNatRulePortMappings) { + l.beginListInboundNatRulePortMappings.remove(req) + } + + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchMigrateToIPBased(req *http.Request) (*http.Response, error) { + if l.srv.MigrateToIPBased == nil { + return nil, &nonRetriableError{errors.New("fake for method MigrateToIPBased not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/migrateToIpBased` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.MigrateLoadBalancerToIPBasedRequest](req) + if err != nil { + return nil, err + } + groupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("groupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + var options *armnetwork.LoadBalancersClientMigrateToIPBasedOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.LoadBalancersClientMigrateToIPBasedOptions{ + Parameters: &body, + } + } + respr, errRespr := l.srv.MigrateToIPBased(req.Context(), groupNameParam, loadBalancerNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).MigratedPools, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchBeginSwapPublicIPAddresses(req *http.Request) (*http.Response, error) { + if l.srv.BeginSwapPublicIPAddresses == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginSwapPublicIPAddresses not implemented")} + } + beginSwapPublicIPAddresses := l.beginSwapPublicIPAddresses.get(req) + if beginSwapPublicIPAddresses == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/setLoadBalancerFrontendPublicIpAddresses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.LoadBalancerVipSwapRequest](req) + if err != nil { + return nil, err + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginSwapPublicIPAddresses(req.Context(), locationParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginSwapPublicIPAddresses = &respr + l.beginSwapPublicIPAddresses.add(req, beginSwapPublicIPAddresses) + } + + resp, err := server.PollerResponderNext(beginSwapPublicIPAddresses, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + l.beginSwapPublicIPAddresses.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginSwapPublicIPAddresses) { + l.beginSwapPublicIPAddresses.remove(req) + } + + return resp, nil +} + +func (l *LoadBalancersServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if l.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/loadBalancers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + loadBalancerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("loadBalancerName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.UpdateTags(req.Context(), resourceGroupNameParam, loadBalancerNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).LoadBalancer, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/localnetworkgateways_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/localnetworkgateways_server.go new file mode 100644 index 00000000000..defa8574ea9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/localnetworkgateways_server.go @@ -0,0 +1,299 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// LocalNetworkGatewaysServer is a fake server for instances of the armnetwork.LocalNetworkGatewaysClient type. +type LocalNetworkGatewaysServer struct { + // BeginCreateOrUpdate is the fake for method LocalNetworkGatewaysClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, parameters armnetwork.LocalNetworkGateway, options *armnetwork.LocalNetworkGatewaysClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.LocalNetworkGatewaysClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method LocalNetworkGatewaysClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, options *armnetwork.LocalNetworkGatewaysClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.LocalNetworkGatewaysClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method LocalNetworkGatewaysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, options *armnetwork.LocalNetworkGatewaysClientGetOptions) (resp azfake.Responder[armnetwork.LocalNetworkGatewaysClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method LocalNetworkGatewaysClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.LocalNetworkGatewaysClientListOptions) (resp azfake.PagerResponder[armnetwork.LocalNetworkGatewaysClientListResponse]) + + // UpdateTags is the fake for method LocalNetworkGatewaysClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, parameters armnetwork.TagsObject, options *armnetwork.LocalNetworkGatewaysClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.LocalNetworkGatewaysClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewLocalNetworkGatewaysServerTransport creates a new instance of LocalNetworkGatewaysServerTransport with the provided implementation. +// The returned LocalNetworkGatewaysServerTransport instance is connected to an instance of armnetwork.LocalNetworkGatewaysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewLocalNetworkGatewaysServerTransport(srv *LocalNetworkGatewaysServer) *LocalNetworkGatewaysServerTransport { + return &LocalNetworkGatewaysServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.LocalNetworkGatewaysClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.LocalNetworkGatewaysClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.LocalNetworkGatewaysClientListResponse]](), + } +} + +// LocalNetworkGatewaysServerTransport connects instances of armnetwork.LocalNetworkGatewaysClient to instances of LocalNetworkGatewaysServer. +// Don't use this type directly, use NewLocalNetworkGatewaysServerTransport instead. +type LocalNetworkGatewaysServerTransport struct { + srv *LocalNetworkGatewaysServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.LocalNetworkGatewaysClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.LocalNetworkGatewaysClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.LocalNetworkGatewaysClientListResponse]] +} + +// Do implements the policy.Transporter interface for LocalNetworkGatewaysServerTransport. +func (l *LocalNetworkGatewaysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "LocalNetworkGatewaysClient.BeginCreateOrUpdate": + resp, err = l.dispatchBeginCreateOrUpdate(req) + case "LocalNetworkGatewaysClient.BeginDelete": + resp, err = l.dispatchBeginDelete(req) + case "LocalNetworkGatewaysClient.Get": + resp, err = l.dispatchGet(req) + case "LocalNetworkGatewaysClient.NewListPager": + resp, err = l.dispatchNewListPager(req) + case "LocalNetworkGatewaysClient.UpdateTags": + resp, err = l.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (l *LocalNetworkGatewaysServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if l.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := l.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/localNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.LocalNetworkGateway](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + localNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("localNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, localNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + l.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + l.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + l.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (l *LocalNetworkGatewaysServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if l.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := l.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/localNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + localNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("localNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.BeginDelete(req.Context(), resourceGroupNameParam, localNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + l.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + l.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + l.beginDelete.remove(req) + } + + return resp, nil +} + +func (l *LocalNetworkGatewaysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if l.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/localNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + localNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("localNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.Get(req.Context(), resourceGroupNameParam, localNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).LocalNetworkGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (l *LocalNetworkGatewaysServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if l.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := l.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/localNetworkGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := l.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + l.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.LocalNetworkGatewaysClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + l.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + l.newListPager.remove(req) + } + return resp, nil +} + +func (l *LocalNetworkGatewaysServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if l.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/localNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + localNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("localNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := l.srv.UpdateTags(req.Context(), resourceGroupNameParam, localNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).LocalNetworkGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/management_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/management_server.go new file mode 100644 index 00000000000..20b3c3eda68 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/management_server.go @@ -0,0 +1,761 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// ManagementServer is a fake server for instances of the armnetwork.ManagementClient type. +type ManagementServer struct { + // CheckDNSNameAvailability is the fake for method ManagementClient.CheckDNSNameAvailability + // HTTP status codes to indicate success: http.StatusOK + CheckDNSNameAvailability func(ctx context.Context, location string, domainNameLabel string, options *armnetwork.ManagementClientCheckDNSNameAvailabilityOptions) (resp azfake.Responder[armnetwork.ManagementClientCheckDNSNameAvailabilityResponse], errResp azfake.ErrorResponder) + + // BeginDeleteBastionShareableLink is the fake for method ManagementClient.BeginDeleteBastionShareableLink + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDeleteBastionShareableLink func(ctx context.Context, resourceGroupName string, bastionHostName string, bslRequest armnetwork.BastionShareableLinkListRequest, options *armnetwork.ManagementClientBeginDeleteBastionShareableLinkOptions) (resp azfake.PollerResponder[armnetwork.ManagementClientDeleteBastionShareableLinkResponse], errResp azfake.ErrorResponder) + + // NewDisconnectActiveSessionsPager is the fake for method ManagementClient.NewDisconnectActiveSessionsPager + // HTTP status codes to indicate success: http.StatusOK + NewDisconnectActiveSessionsPager func(resourceGroupName string, bastionHostName string, sessionIDs armnetwork.SessionIDs, options *armnetwork.ManagementClientDisconnectActiveSessionsOptions) (resp azfake.PagerResponder[armnetwork.ManagementClientDisconnectActiveSessionsResponse]) + + // ExpressRouteProviderPort is the fake for method ManagementClient.ExpressRouteProviderPort + // HTTP status codes to indicate success: http.StatusOK + ExpressRouteProviderPort func(ctx context.Context, providerport string, options *armnetwork.ManagementClientExpressRouteProviderPortOptions) (resp azfake.Responder[armnetwork.ManagementClientExpressRouteProviderPortResponse], errResp azfake.ErrorResponder) + + // BeginGeneratevirtualwanvpnserverconfigurationvpnprofile is the fake for method ManagementClient.BeginGeneratevirtualwanvpnserverconfigurationvpnprofile + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGeneratevirtualwanvpnserverconfigurationvpnprofile func(ctx context.Context, resourceGroupName string, virtualWANName string, vpnClientParams armnetwork.VirtualWanVPNProfileParameters, options *armnetwork.ManagementClientBeginGeneratevirtualwanvpnserverconfigurationvpnprofileOptions) (resp azfake.PollerResponder[armnetwork.ManagementClientGeneratevirtualwanvpnserverconfigurationvpnprofileResponse], errResp azfake.ErrorResponder) + + // BeginGetActiveSessions is the fake for method ManagementClient.BeginGetActiveSessions + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetActiveSessions func(ctx context.Context, resourceGroupName string, bastionHostName string, options *armnetwork.ManagementClientBeginGetActiveSessionsOptions) (resp azfake.PollerResponder[azfake.PagerResponder[armnetwork.ManagementClientGetActiveSessionsResponse]], errResp azfake.ErrorResponder) + + // NewGetBastionShareableLinkPager is the fake for method ManagementClient.NewGetBastionShareableLinkPager + // HTTP status codes to indicate success: http.StatusOK + NewGetBastionShareableLinkPager func(resourceGroupName string, bastionHostName string, bslRequest armnetwork.BastionShareableLinkListRequest, options *armnetwork.ManagementClientGetBastionShareableLinkOptions) (resp azfake.PagerResponder[armnetwork.ManagementClientGetBastionShareableLinkResponse]) + + // ListActiveConnectivityConfigurations is the fake for method ManagementClient.ListActiveConnectivityConfigurations + // HTTP status codes to indicate success: http.StatusOK + ListActiveConnectivityConfigurations func(ctx context.Context, resourceGroupName string, networkManagerName string, parameters armnetwork.ActiveConfigurationParameter, options *armnetwork.ManagementClientListActiveConnectivityConfigurationsOptions) (resp azfake.Responder[armnetwork.ManagementClientListActiveConnectivityConfigurationsResponse], errResp azfake.ErrorResponder) + + // ListActiveSecurityAdminRules is the fake for method ManagementClient.ListActiveSecurityAdminRules + // HTTP status codes to indicate success: http.StatusOK + ListActiveSecurityAdminRules func(ctx context.Context, resourceGroupName string, networkManagerName string, parameters armnetwork.ActiveConfigurationParameter, options *armnetwork.ManagementClientListActiveSecurityAdminRulesOptions) (resp azfake.Responder[armnetwork.ManagementClientListActiveSecurityAdminRulesResponse], errResp azfake.ErrorResponder) + + // ListNetworkManagerEffectiveConnectivityConfigurations is the fake for method ManagementClient.ListNetworkManagerEffectiveConnectivityConfigurations + // HTTP status codes to indicate success: http.StatusOK + ListNetworkManagerEffectiveConnectivityConfigurations func(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters armnetwork.QueryRequestOptions, options *armnetwork.ManagementClientListNetworkManagerEffectiveConnectivityConfigurationsOptions) (resp azfake.Responder[armnetwork.ManagementClientListNetworkManagerEffectiveConnectivityConfigurationsResponse], errResp azfake.ErrorResponder) + + // ListNetworkManagerEffectiveSecurityAdminRules is the fake for method ManagementClient.ListNetworkManagerEffectiveSecurityAdminRules + // HTTP status codes to indicate success: http.StatusOK + ListNetworkManagerEffectiveSecurityAdminRules func(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters armnetwork.QueryRequestOptions, options *armnetwork.ManagementClientListNetworkManagerEffectiveSecurityAdminRulesOptions) (resp azfake.Responder[armnetwork.ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse], errResp azfake.ErrorResponder) + + // BeginPutBastionShareableLink is the fake for method ManagementClient.BeginPutBastionShareableLink + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPutBastionShareableLink func(ctx context.Context, resourceGroupName string, bastionHostName string, bslRequest armnetwork.BastionShareableLinkListRequest, options *armnetwork.ManagementClientBeginPutBastionShareableLinkOptions) (resp azfake.PollerResponder[azfake.PagerResponder[armnetwork.ManagementClientPutBastionShareableLinkResponse]], errResp azfake.ErrorResponder) + + // SupportedSecurityProviders is the fake for method ManagementClient.SupportedSecurityProviders + // HTTP status codes to indicate success: http.StatusOK + SupportedSecurityProviders func(ctx context.Context, resourceGroupName string, virtualWANName string, options *armnetwork.ManagementClientSupportedSecurityProvidersOptions) (resp azfake.Responder[armnetwork.ManagementClientSupportedSecurityProvidersResponse], errResp azfake.ErrorResponder) +} + +// NewManagementServerTransport creates a new instance of ManagementServerTransport with the provided implementation. +// The returned ManagementServerTransport instance is connected to an instance of armnetwork.ManagementClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewManagementServerTransport(srv *ManagementServer) *ManagementServerTransport { + return &ManagementServerTransport{ + srv: srv, + beginDeleteBastionShareableLink: newTracker[azfake.PollerResponder[armnetwork.ManagementClientDeleteBastionShareableLinkResponse]](), + newDisconnectActiveSessionsPager: newTracker[azfake.PagerResponder[armnetwork.ManagementClientDisconnectActiveSessionsResponse]](), + beginGeneratevirtualwanvpnserverconfigurationvpnprofile: newTracker[azfake.PollerResponder[armnetwork.ManagementClientGeneratevirtualwanvpnserverconfigurationvpnprofileResponse]](), + beginGetActiveSessions: newTracker[azfake.PollerResponder[azfake.PagerResponder[armnetwork.ManagementClientGetActiveSessionsResponse]]](), + newGetBastionShareableLinkPager: newTracker[azfake.PagerResponder[armnetwork.ManagementClientGetBastionShareableLinkResponse]](), + beginPutBastionShareableLink: newTracker[azfake.PollerResponder[azfake.PagerResponder[armnetwork.ManagementClientPutBastionShareableLinkResponse]]](), + } +} + +// ManagementServerTransport connects instances of armnetwork.ManagementClient to instances of ManagementServer. +// Don't use this type directly, use NewManagementServerTransport instead. +type ManagementServerTransport struct { + srv *ManagementServer + beginDeleteBastionShareableLink *tracker[azfake.PollerResponder[armnetwork.ManagementClientDeleteBastionShareableLinkResponse]] + newDisconnectActiveSessionsPager *tracker[azfake.PagerResponder[armnetwork.ManagementClientDisconnectActiveSessionsResponse]] + beginGeneratevirtualwanvpnserverconfigurationvpnprofile *tracker[azfake.PollerResponder[armnetwork.ManagementClientGeneratevirtualwanvpnserverconfigurationvpnprofileResponse]] + beginGetActiveSessions *tracker[azfake.PollerResponder[azfake.PagerResponder[armnetwork.ManagementClientGetActiveSessionsResponse]]] + newGetBastionShareableLinkPager *tracker[azfake.PagerResponder[armnetwork.ManagementClientGetBastionShareableLinkResponse]] + beginPutBastionShareableLink *tracker[azfake.PollerResponder[azfake.PagerResponder[armnetwork.ManagementClientPutBastionShareableLinkResponse]]] +} + +// Do implements the policy.Transporter interface for ManagementServerTransport. +func (m *ManagementServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ManagementClient.CheckDNSNameAvailability": + resp, err = m.dispatchCheckDNSNameAvailability(req) + case "ManagementClient.BeginDeleteBastionShareableLink": + resp, err = m.dispatchBeginDeleteBastionShareableLink(req) + case "ManagementClient.NewDisconnectActiveSessionsPager": + resp, err = m.dispatchNewDisconnectActiveSessionsPager(req) + case "ManagementClient.ExpressRouteProviderPort": + resp, err = m.dispatchExpressRouteProviderPort(req) + case "ManagementClient.BeginGeneratevirtualwanvpnserverconfigurationvpnprofile": + resp, err = m.dispatchBeginGeneratevirtualwanvpnserverconfigurationvpnprofile(req) + case "ManagementClient.BeginGetActiveSessions": + resp, err = m.dispatchBeginGetActiveSessions(req) + case "ManagementClient.NewGetBastionShareableLinkPager": + resp, err = m.dispatchNewGetBastionShareableLinkPager(req) + case "ManagementClient.ListActiveConnectivityConfigurations": + resp, err = m.dispatchListActiveConnectivityConfigurations(req) + case "ManagementClient.ListActiveSecurityAdminRules": + resp, err = m.dispatchListActiveSecurityAdminRules(req) + case "ManagementClient.ListNetworkManagerEffectiveConnectivityConfigurations": + resp, err = m.dispatchListNetworkManagerEffectiveConnectivityConfigurations(req) + case "ManagementClient.ListNetworkManagerEffectiveSecurityAdminRules": + resp, err = m.dispatchListNetworkManagerEffectiveSecurityAdminRules(req) + case "ManagementClient.BeginPutBastionShareableLink": + resp, err = m.dispatchBeginPutBastionShareableLink(req) + case "ManagementClient.SupportedSecurityProviders": + resp, err = m.dispatchSupportedSecurityProviders(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (m *ManagementServerTransport) dispatchCheckDNSNameAvailability(req *http.Request) (*http.Response, error) { + if m.srv.CheckDNSNameAvailability == nil { + return nil, &nonRetriableError{errors.New("fake for method CheckDNSNameAvailability not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/CheckDnsNameAvailability` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + domainNameLabelParam, err := url.QueryUnescape(qp.Get("domainNameLabel")) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.CheckDNSNameAvailability(req.Context(), locationParam, domainNameLabelParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).DNSNameAvailabilityResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchBeginDeleteBastionShareableLink(req *http.Request) (*http.Response, error) { + if m.srv.BeginDeleteBastionShareableLink == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeleteBastionShareableLink not implemented")} + } + beginDeleteBastionShareableLink := m.beginDeleteBastionShareableLink.get(req) + if beginDeleteBastionShareableLink == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/deleteShareableLinks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.BastionShareableLinkListRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.BeginDeleteBastionShareableLink(req.Context(), resourceGroupNameParam, bastionHostNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeleteBastionShareableLink = &respr + m.beginDeleteBastionShareableLink.add(req, beginDeleteBastionShareableLink) + } + + resp, err := server.PollerResponderNext(beginDeleteBastionShareableLink, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + m.beginDeleteBastionShareableLink.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeleteBastionShareableLink) { + m.beginDeleteBastionShareableLink.remove(req) + } + + return resp, nil +} + +func (m *ManagementServerTransport) dispatchNewDisconnectActiveSessionsPager(req *http.Request) (*http.Response, error) { + if m.srv.NewDisconnectActiveSessionsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewDisconnectActiveSessionsPager not implemented")} + } + newDisconnectActiveSessionsPager := m.newDisconnectActiveSessionsPager.get(req) + if newDisconnectActiveSessionsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/disconnectActiveSessions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SessionIDs](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + resp := m.srv.NewDisconnectActiveSessionsPager(resourceGroupNameParam, bastionHostNameParam, body, nil) + newDisconnectActiveSessionsPager = &resp + m.newDisconnectActiveSessionsPager.add(req, newDisconnectActiveSessionsPager) + server.PagerResponderInjectNextLinks(newDisconnectActiveSessionsPager, req, func(page *armnetwork.ManagementClientDisconnectActiveSessionsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newDisconnectActiveSessionsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + m.newDisconnectActiveSessionsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newDisconnectActiveSessionsPager) { + m.newDisconnectActiveSessionsPager.remove(req) + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchExpressRouteProviderPort(req *http.Request) (*http.Response, error) { + if m.srv.ExpressRouteProviderPort == nil { + return nil, &nonRetriableError{errors.New("fake for method ExpressRouteProviderPort not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteProviderPorts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + providerportParam, err := url.PathUnescape(matches[regex.SubexpIndex("providerport")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.ExpressRouteProviderPort(req.Context(), providerportParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ExpressRouteProviderPort, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchBeginGeneratevirtualwanvpnserverconfigurationvpnprofile(req *http.Request) (*http.Response, error) { + if m.srv.BeginGeneratevirtualwanvpnserverconfigurationvpnprofile == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGeneratevirtualwanvpnserverconfigurationvpnprofile not implemented")} + } + beginGeneratevirtualwanvpnserverconfigurationvpnprofile := m.beginGeneratevirtualwanvpnserverconfigurationvpnprofile.get(req) + if beginGeneratevirtualwanvpnserverconfigurationvpnprofile == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/GenerateVpnProfile` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualWanVPNProfileParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.BeginGeneratevirtualwanvpnserverconfigurationvpnprofile(req.Context(), resourceGroupNameParam, virtualWANNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGeneratevirtualwanvpnserverconfigurationvpnprofile = &respr + m.beginGeneratevirtualwanvpnserverconfigurationvpnprofile.add(req, beginGeneratevirtualwanvpnserverconfigurationvpnprofile) + } + + resp, err := server.PollerResponderNext(beginGeneratevirtualwanvpnserverconfigurationvpnprofile, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + m.beginGeneratevirtualwanvpnserverconfigurationvpnprofile.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGeneratevirtualwanvpnserverconfigurationvpnprofile) { + m.beginGeneratevirtualwanvpnserverconfigurationvpnprofile.remove(req) + } + + return resp, nil +} + +func (m *ManagementServerTransport) dispatchBeginGetActiveSessions(req *http.Request) (*http.Response, error) { + if m.srv.BeginGetActiveSessions == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetActiveSessions not implemented")} + } + beginGetActiveSessions := m.beginGetActiveSessions.get(req) + if beginGetActiveSessions == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getActiveSessions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.BeginGetActiveSessions(req.Context(), resourceGroupNameParam, bastionHostNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetActiveSessions = &respr + m.beginGetActiveSessions.add(req, beginGetActiveSessions) + } + + resp, err := server.PollerResponderNext(beginGetActiveSessions, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + m.beginGetActiveSessions.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetActiveSessions) { + m.beginGetActiveSessions.remove(req) + } + + return resp, nil +} + +func (m *ManagementServerTransport) dispatchNewGetBastionShareableLinkPager(req *http.Request) (*http.Response, error) { + if m.srv.NewGetBastionShareableLinkPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewGetBastionShareableLinkPager not implemented")} + } + newGetBastionShareableLinkPager := m.newGetBastionShareableLinkPager.get(req) + if newGetBastionShareableLinkPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getShareableLinks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.BastionShareableLinkListRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + resp := m.srv.NewGetBastionShareableLinkPager(resourceGroupNameParam, bastionHostNameParam, body, nil) + newGetBastionShareableLinkPager = &resp + m.newGetBastionShareableLinkPager.add(req, newGetBastionShareableLinkPager) + server.PagerResponderInjectNextLinks(newGetBastionShareableLinkPager, req, func(page *armnetwork.ManagementClientGetBastionShareableLinkResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newGetBastionShareableLinkPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + m.newGetBastionShareableLinkPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newGetBastionShareableLinkPager) { + m.newGetBastionShareableLinkPager.remove(req) + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchListActiveConnectivityConfigurations(req *http.Request) (*http.Response, error) { + if m.srv.ListActiveConnectivityConfigurations == nil { + return nil, &nonRetriableError{errors.New("fake for method ListActiveConnectivityConfigurations not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/listActiveConnectivityConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.ActiveConfigurationParameter](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + var options *armnetwork.ManagementClientListActiveConnectivityConfigurationsOptions + if topParam != nil { + options = &armnetwork.ManagementClientListActiveConnectivityConfigurationsOptions{ + Top: topParam, + } + } + respr, errRespr := m.srv.ListActiveConnectivityConfigurations(req.Context(), resourceGroupNameParam, networkManagerNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ActiveConnectivityConfigurationsListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchListActiveSecurityAdminRules(req *http.Request) (*http.Response, error) { + if m.srv.ListActiveSecurityAdminRules == nil { + return nil, &nonRetriableError{errors.New("fake for method ListActiveSecurityAdminRules not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/listActiveSecurityAdminRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.ActiveConfigurationParameter](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + var options *armnetwork.ManagementClientListActiveSecurityAdminRulesOptions + if topParam != nil { + options = &armnetwork.ManagementClientListActiveSecurityAdminRulesOptions{ + Top: topParam, + } + } + respr, errRespr := m.srv.ListActiveSecurityAdminRules(req.Context(), resourceGroupNameParam, networkManagerNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ActiveSecurityAdminRulesListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchListNetworkManagerEffectiveConnectivityConfigurations(req *http.Request) (*http.Response, error) { + if m.srv.ListNetworkManagerEffectiveConnectivityConfigurations == nil { + return nil, &nonRetriableError{errors.New("fake for method ListNetworkManagerEffectiveConnectivityConfigurations not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/listNetworkManagerEffectiveConnectivityConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.QueryRequestOptions](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + var options *armnetwork.ManagementClientListNetworkManagerEffectiveConnectivityConfigurationsOptions + if topParam != nil { + options = &armnetwork.ManagementClientListNetworkManagerEffectiveConnectivityConfigurationsOptions{ + Top: topParam, + } + } + respr, errRespr := m.srv.ListNetworkManagerEffectiveConnectivityConfigurations(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ManagerEffectiveConnectivityConfigurationListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchListNetworkManagerEffectiveSecurityAdminRules(req *http.Request) (*http.Response, error) { + if m.srv.ListNetworkManagerEffectiveSecurityAdminRules == nil { + return nil, &nonRetriableError{errors.New("fake for method ListNetworkManagerEffectiveSecurityAdminRules not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/listNetworkManagerEffectiveSecurityAdminRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.QueryRequestOptions](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + var options *armnetwork.ManagementClientListNetworkManagerEffectiveSecurityAdminRulesOptions + if topParam != nil { + options = &armnetwork.ManagementClientListNetworkManagerEffectiveSecurityAdminRulesOptions{ + Top: topParam, + } + } + respr, errRespr := m.srv.ListNetworkManagerEffectiveSecurityAdminRules(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ManagerEffectiveSecurityAdminRulesListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementServerTransport) dispatchBeginPutBastionShareableLink(req *http.Request) (*http.Response, error) { + if m.srv.BeginPutBastionShareableLink == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPutBastionShareableLink not implemented")} + } + beginPutBastionShareableLink := m.beginPutBastionShareableLink.get(req) + if beginPutBastionShareableLink == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/bastionHosts/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/createShareableLinks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.BastionShareableLinkListRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + bastionHostNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("bastionHostName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.BeginPutBastionShareableLink(req.Context(), resourceGroupNameParam, bastionHostNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPutBastionShareableLink = &respr + m.beginPutBastionShareableLink.add(req, beginPutBastionShareableLink) + } + + resp, err := server.PollerResponderNext(beginPutBastionShareableLink, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + m.beginPutBastionShareableLink.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPutBastionShareableLink) { + m.beginPutBastionShareableLink.remove(req) + } + + return resp, nil +} + +func (m *ManagementServerTransport) dispatchSupportedSecurityProviders(req *http.Request) (*http.Response, error) { + if m.srv.SupportedSecurityProviders == nil { + return nil, &nonRetriableError{errors.New("fake for method SupportedSecurityProviders not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/supportedSecurityProviders` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.SupportedSecurityProviders(req.Context(), resourceGroupNameParam, virtualWANNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualWanSecurityProviders, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managementgroupnetworkmanagerconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managementgroupnetworkmanagerconnections_server.go new file mode 100644 index 00000000000..1ce7b2c1345 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managementgroupnetworkmanagerconnections_server.go @@ -0,0 +1,258 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// ManagementGroupNetworkManagerConnectionsServer is a fake server for instances of the armnetwork.ManagementGroupNetworkManagerConnectionsClient type. +type ManagementGroupNetworkManagerConnectionsServer struct { + // CreateOrUpdate is the fake for method ManagementGroupNetworkManagerConnectionsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, managementGroupID string, networkManagerConnectionName string, parameters armnetwork.ManagerConnection, options *armnetwork.ManagementGroupNetworkManagerConnectionsClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.ManagementGroupNetworkManagerConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method ManagementGroupNetworkManagerConnectionsClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, managementGroupID string, networkManagerConnectionName string, options *armnetwork.ManagementGroupNetworkManagerConnectionsClientDeleteOptions) (resp azfake.Responder[armnetwork.ManagementGroupNetworkManagerConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ManagementGroupNetworkManagerConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, managementGroupID string, networkManagerConnectionName string, options *armnetwork.ManagementGroupNetworkManagerConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.ManagementGroupNetworkManagerConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ManagementGroupNetworkManagerConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(managementGroupID string, options *armnetwork.ManagementGroupNetworkManagerConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.ManagementGroupNetworkManagerConnectionsClientListResponse]) +} + +// NewManagementGroupNetworkManagerConnectionsServerTransport creates a new instance of ManagementGroupNetworkManagerConnectionsServerTransport with the provided implementation. +// The returned ManagementGroupNetworkManagerConnectionsServerTransport instance is connected to an instance of armnetwork.ManagementGroupNetworkManagerConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewManagementGroupNetworkManagerConnectionsServerTransport(srv *ManagementGroupNetworkManagerConnectionsServer) *ManagementGroupNetworkManagerConnectionsServerTransport { + return &ManagementGroupNetworkManagerConnectionsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.ManagementGroupNetworkManagerConnectionsClientListResponse]](), + } +} + +// ManagementGroupNetworkManagerConnectionsServerTransport connects instances of armnetwork.ManagementGroupNetworkManagerConnectionsClient to instances of ManagementGroupNetworkManagerConnectionsServer. +// Don't use this type directly, use NewManagementGroupNetworkManagerConnectionsServerTransport instead. +type ManagementGroupNetworkManagerConnectionsServerTransport struct { + srv *ManagementGroupNetworkManagerConnectionsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.ManagementGroupNetworkManagerConnectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ManagementGroupNetworkManagerConnectionsServerTransport. +func (m *ManagementGroupNetworkManagerConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ManagementGroupNetworkManagerConnectionsClient.CreateOrUpdate": + resp, err = m.dispatchCreateOrUpdate(req) + case "ManagementGroupNetworkManagerConnectionsClient.Delete": + resp, err = m.dispatchDelete(req) + case "ManagementGroupNetworkManagerConnectionsClient.Get": + resp, err = m.dispatchGet(req) + case "ManagementGroupNetworkManagerConnectionsClient.NewListPager": + resp, err = m.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (m *ManagementGroupNetworkManagerConnectionsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if m.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/providers/Microsoft\.Management/managementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ManagerConnection](req) + if err != nil { + return nil, err + } + managementGroupIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("managementGroupId")]) + if err != nil { + return nil, err + } + networkManagerConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.CreateOrUpdate(req.Context(), managementGroupIDParam, networkManagerConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ManagerConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementGroupNetworkManagerConnectionsServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if m.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/providers/Microsoft\.Management/managementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + managementGroupIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("managementGroupId")]) + if err != nil { + return nil, err + } + networkManagerConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.Delete(req.Context(), managementGroupIDParam, networkManagerConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementGroupNetworkManagerConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if m.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/providers/Microsoft\.Management/managementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + managementGroupIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("managementGroupId")]) + if err != nil { + return nil, err + } + networkManagerConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.Get(req.Context(), managementGroupIDParam, networkManagerConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ManagerConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagementGroupNetworkManagerConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if m.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := m.newListPager.get(req) + if newListPager == nil { + const regexStr = `/providers/Microsoft\.Management/managementGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + managementGroupIDParam, err := url.PathUnescape(matches[regex.SubexpIndex("managementGroupId")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.ManagementGroupNetworkManagerConnectionsClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.ManagementGroupNetworkManagerConnectionsClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := m.srv.NewListPager(managementGroupIDParam, options) + newListPager = &resp + m.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ManagementGroupNetworkManagerConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + m.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + m.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managercommits_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managercommits_server.go new file mode 100644 index 00000000000..0d2e55e3b27 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managercommits_server.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ManagerCommitsServer is a fake server for instances of the armnetwork.ManagerCommitsClient type. +type ManagerCommitsServer struct { + // BeginPost is the fake for method ManagerCommitsClient.BeginPost + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPost func(ctx context.Context, resourceGroupName string, networkManagerName string, parameters armnetwork.ManagerCommit, options *armnetwork.ManagerCommitsClientBeginPostOptions) (resp azfake.PollerResponder[armnetwork.ManagerCommitsClientPostResponse], errResp azfake.ErrorResponder) +} + +// NewManagerCommitsServerTransport creates a new instance of ManagerCommitsServerTransport with the provided implementation. +// The returned ManagerCommitsServerTransport instance is connected to an instance of armnetwork.ManagerCommitsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewManagerCommitsServerTransport(srv *ManagerCommitsServer) *ManagerCommitsServerTransport { + return &ManagerCommitsServerTransport{ + srv: srv, + beginPost: newTracker[azfake.PollerResponder[armnetwork.ManagerCommitsClientPostResponse]](), + } +} + +// ManagerCommitsServerTransport connects instances of armnetwork.ManagerCommitsClient to instances of ManagerCommitsServer. +// Don't use this type directly, use NewManagerCommitsServerTransport instead. +type ManagerCommitsServerTransport struct { + srv *ManagerCommitsServer + beginPost *tracker[azfake.PollerResponder[armnetwork.ManagerCommitsClientPostResponse]] +} + +// Do implements the policy.Transporter interface for ManagerCommitsServerTransport. +func (m *ManagerCommitsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ManagerCommitsClient.BeginPost": + resp, err = m.dispatchBeginPost(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (m *ManagerCommitsServerTransport) dispatchBeginPost(req *http.Request) (*http.Response, error) { + if m.srv.BeginPost == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPost not implemented")} + } + beginPost := m.beginPost.get(req) + if beginPost == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/commit` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ManagerCommit](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.BeginPost(req.Context(), resourceGroupNameParam, networkManagerNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPost = &respr + m.beginPost.add(req, beginPost) + } + + resp, err := server.PollerResponderNext(beginPost, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + m.beginPost.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPost) { + m.beginPost.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managerdeploymentstatus_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managerdeploymentstatus_server.go new file mode 100644 index 00000000000..d672820d35b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managerdeploymentstatus_server.go @@ -0,0 +1,126 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// ManagerDeploymentStatusServer is a fake server for instances of the armnetwork.ManagerDeploymentStatusClient type. +type ManagerDeploymentStatusServer struct { + // List is the fake for method ManagerDeploymentStatusClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, networkManagerName string, parameters armnetwork.ManagerDeploymentStatusParameter, options *armnetwork.ManagerDeploymentStatusClientListOptions) (resp azfake.Responder[armnetwork.ManagerDeploymentStatusClientListResponse], errResp azfake.ErrorResponder) +} + +// NewManagerDeploymentStatusServerTransport creates a new instance of ManagerDeploymentStatusServerTransport with the provided implementation. +// The returned ManagerDeploymentStatusServerTransport instance is connected to an instance of armnetwork.ManagerDeploymentStatusClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewManagerDeploymentStatusServerTransport(srv *ManagerDeploymentStatusServer) *ManagerDeploymentStatusServerTransport { + return &ManagerDeploymentStatusServerTransport{srv: srv} +} + +// ManagerDeploymentStatusServerTransport connects instances of armnetwork.ManagerDeploymentStatusClient to instances of ManagerDeploymentStatusServer. +// Don't use this type directly, use NewManagerDeploymentStatusServerTransport instead. +type ManagerDeploymentStatusServerTransport struct { + srv *ManagerDeploymentStatusServer +} + +// Do implements the policy.Transporter interface for ManagerDeploymentStatusServerTransport. +func (m *ManagerDeploymentStatusServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ManagerDeploymentStatusClient.List": + resp, err = m.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (m *ManagerDeploymentStatusServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if m.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/listDeploymentStatus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.ManagerDeploymentStatusParameter](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + var options *armnetwork.ManagerDeploymentStatusClientListOptions + if topParam != nil { + options = &armnetwork.ManagerDeploymentStatusClientListOptions{ + Top: topParam, + } + } + respr, errRespr := m.srv.List(req.Context(), resourceGroupNameParam, networkManagerNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ManagerDeploymentStatusListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managers_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managers_server.go new file mode 100644 index 00000000000..74afa7a6fa1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/managers_server.go @@ -0,0 +1,397 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// ManagersServer is a fake server for instances of the armnetwork.ManagersClient type. +type ManagersServer struct { + // CreateOrUpdate is the fake for method ManagersClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, parameters armnetwork.Manager, options *armnetwork.ManagersClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.ManagersClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ManagersClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkManagerName string, options *armnetwork.ManagersClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ManagersClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ManagersClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, options *armnetwork.ManagersClientGetOptions) (resp azfake.Responder[armnetwork.ManagersClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ManagersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.ManagersClientListOptions) (resp azfake.PagerResponder[armnetwork.ManagersClientListResponse]) + + // NewListBySubscriptionPager is the fake for method ManagersClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armnetwork.ManagersClientListBySubscriptionOptions) (resp azfake.PagerResponder[armnetwork.ManagersClientListBySubscriptionResponse]) + + // Patch is the fake for method ManagersClient.Patch + // HTTP status codes to indicate success: http.StatusOK + Patch func(ctx context.Context, resourceGroupName string, networkManagerName string, parameters armnetwork.PatchObject, options *armnetwork.ManagersClientPatchOptions) (resp azfake.Responder[armnetwork.ManagersClientPatchResponse], errResp azfake.ErrorResponder) +} + +// NewManagersServerTransport creates a new instance of ManagersServerTransport with the provided implementation. +// The returned ManagersServerTransport instance is connected to an instance of armnetwork.ManagersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewManagersServerTransport(srv *ManagersServer) *ManagersServerTransport { + return &ManagersServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ManagersClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ManagersClientListResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armnetwork.ManagersClientListBySubscriptionResponse]](), + } +} + +// ManagersServerTransport connects instances of armnetwork.ManagersClient to instances of ManagersServer. +// Don't use this type directly, use NewManagersServerTransport instead. +type ManagersServerTransport struct { + srv *ManagersServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.ManagersClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ManagersClientListResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armnetwork.ManagersClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for ManagersServerTransport. +func (m *ManagersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ManagersClient.CreateOrUpdate": + resp, err = m.dispatchCreateOrUpdate(req) + case "ManagersClient.BeginDelete": + resp, err = m.dispatchBeginDelete(req) + case "ManagersClient.Get": + resp, err = m.dispatchGet(req) + case "ManagersClient.NewListPager": + resp, err = m.dispatchNewListPager(req) + case "ManagersClient.NewListBySubscriptionPager": + resp, err = m.dispatchNewListBySubscriptionPager(req) + case "ManagersClient.Patch": + resp, err = m.dispatchPatch(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (m *ManagersServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if m.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.Manager](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Manager, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagersServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if m.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := m.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + forceUnescaped, err := url.QueryUnescape(qp.Get("force")) + if err != nil { + return nil, err + } + forceParam, err := parseOptional(forceUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armnetwork.ManagersClientBeginDeleteOptions + if forceParam != nil { + options = &armnetwork.ManagersClientBeginDeleteOptions{ + Force: forceParam, + } + } + respr, errRespr := m.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkManagerNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + m.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + m.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + m.beginDelete.remove(req) + } + + return resp, nil +} + +func (m *ManagersServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if m.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Manager, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (m *ManagersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if m.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := m.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.ManagersClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.ManagersClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := m.srv.NewListPager(resourceGroupNameParam, options) + newListPager = &resp + m.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ManagersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + m.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + m.newListPager.remove(req) + } + return resp, nil +} + +func (m *ManagersServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if m.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := m.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.ManagersClientListBySubscriptionOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.ManagersClientListBySubscriptionOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := m.srv.NewListBySubscriptionPager(options) + newListBySubscriptionPager = &resp + m.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armnetwork.ManagersClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + m.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + m.newListBySubscriptionPager.remove(req) + } + return resp, nil +} + +func (m *ManagersServerTransport) dispatchPatch(req *http.Request) (*http.Response, error) { + if m.srv.Patch == nil { + return nil, &nonRetriableError{errors.New("fake for method Patch not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PatchObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + respr, errRespr := m.srv.Patch(req.Context(), resourceGroupNameParam, networkManagerNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Manager, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/natgateways_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/natgateways_server.go new file mode 100644 index 00000000000..fae876cba56 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/natgateways_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// NatGatewaysServer is a fake server for instances of the armnetwork.NatGatewaysClient type. +type NatGatewaysServer struct { + // BeginCreateOrUpdate is the fake for method NatGatewaysClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated, http.StatusAccepted + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, natGatewayName string, parameters armnetwork.NatGateway, options *armnetwork.NatGatewaysClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.NatGatewaysClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method NatGatewaysClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, natGatewayName string, options *armnetwork.NatGatewaysClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.NatGatewaysClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method NatGatewaysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, natGatewayName string, options *armnetwork.NatGatewaysClientGetOptions) (resp azfake.Responder[armnetwork.NatGatewaysClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method NatGatewaysClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.NatGatewaysClientListOptions) (resp azfake.PagerResponder[armnetwork.NatGatewaysClientListResponse]) + + // NewListAllPager is the fake for method NatGatewaysClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.NatGatewaysClientListAllOptions) (resp azfake.PagerResponder[armnetwork.NatGatewaysClientListAllResponse]) + + // UpdateTags is the fake for method NatGatewaysClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, natGatewayName string, parameters armnetwork.TagsObject, options *armnetwork.NatGatewaysClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.NatGatewaysClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewNatGatewaysServerTransport creates a new instance of NatGatewaysServerTransport with the provided implementation. +// The returned NatGatewaysServerTransport instance is connected to an instance of armnetwork.NatGatewaysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewNatGatewaysServerTransport(srv *NatGatewaysServer) *NatGatewaysServerTransport { + return &NatGatewaysServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.NatGatewaysClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.NatGatewaysClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.NatGatewaysClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.NatGatewaysClientListAllResponse]](), + } +} + +// NatGatewaysServerTransport connects instances of armnetwork.NatGatewaysClient to instances of NatGatewaysServer. +// Don't use this type directly, use NewNatGatewaysServerTransport instead. +type NatGatewaysServerTransport struct { + srv *NatGatewaysServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.NatGatewaysClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.NatGatewaysClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.NatGatewaysClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.NatGatewaysClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for NatGatewaysServerTransport. +func (n *NatGatewaysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "NatGatewaysClient.BeginCreateOrUpdate": + resp, err = n.dispatchBeginCreateOrUpdate(req) + case "NatGatewaysClient.BeginDelete": + resp, err = n.dispatchBeginDelete(req) + case "NatGatewaysClient.Get": + resp, err = n.dispatchGet(req) + case "NatGatewaysClient.NewListPager": + resp, err = n.dispatchNewListPager(req) + case "NatGatewaysClient.NewListAllPager": + resp, err = n.dispatchNewListAllPager(req) + case "NatGatewaysClient.UpdateTags": + resp, err = n.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (n *NatGatewaysServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if n.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := n.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/natGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.NatGateway](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + natGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := n.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, natGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + n.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated, http.StatusAccepted}, resp.StatusCode) { + n.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + n.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (n *NatGatewaysServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if n.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := n.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/natGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + natGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := n.srv.BeginDelete(req.Context(), resourceGroupNameParam, natGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + n.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + n.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + n.beginDelete.remove(req) + } + + return resp, nil +} + +func (n *NatGatewaysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if n.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/natGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + natGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natGatewayName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.NatGatewaysClientGetOptions + if expandParam != nil { + options = &armnetwork.NatGatewaysClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := n.srv.Get(req.Context(), resourceGroupNameParam, natGatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).NatGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (n *NatGatewaysServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if n.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := n.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/natGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := n.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + n.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.NatGatewaysClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + n.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + n.newListPager.remove(req) + } + return resp, nil +} + +func (n *NatGatewaysServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if n.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := n.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/natGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := n.srv.NewListAllPager(nil) + newListAllPager = &resp + n.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.NatGatewaysClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + n.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + n.newListAllPager.remove(req) + } + return resp, nil +} + +func (n *NatGatewaysServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if n.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/natGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + natGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := n.srv.UpdateTags(req.Context(), resourceGroupNameParam, natGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).NatGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/natrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/natrules_server.go new file mode 100644 index 00000000000..b5784c90ffe --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/natrules_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// NatRulesServer is a fake server for instances of the armnetwork.NatRulesClient type. +type NatRulesServer struct { + // BeginCreateOrUpdate is the fake for method NatRulesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, gatewayName string, natRuleName string, natRuleParameters armnetwork.VPNGatewayNatRule, options *armnetwork.NatRulesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.NatRulesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method NatRulesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, gatewayName string, natRuleName string, options *armnetwork.NatRulesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.NatRulesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method NatRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, gatewayName string, natRuleName string, options *armnetwork.NatRulesClientGetOptions) (resp azfake.Responder[armnetwork.NatRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByVPNGatewayPager is the fake for method NatRulesClient.NewListByVPNGatewayPager + // HTTP status codes to indicate success: http.StatusOK + NewListByVPNGatewayPager func(resourceGroupName string, gatewayName string, options *armnetwork.NatRulesClientListByVPNGatewayOptions) (resp azfake.PagerResponder[armnetwork.NatRulesClientListByVPNGatewayResponse]) +} + +// NewNatRulesServerTransport creates a new instance of NatRulesServerTransport with the provided implementation. +// The returned NatRulesServerTransport instance is connected to an instance of armnetwork.NatRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewNatRulesServerTransport(srv *NatRulesServer) *NatRulesServerTransport { + return &NatRulesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.NatRulesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.NatRulesClientDeleteResponse]](), + newListByVPNGatewayPager: newTracker[azfake.PagerResponder[armnetwork.NatRulesClientListByVPNGatewayResponse]](), + } +} + +// NatRulesServerTransport connects instances of armnetwork.NatRulesClient to instances of NatRulesServer. +// Don't use this type directly, use NewNatRulesServerTransport instead. +type NatRulesServerTransport struct { + srv *NatRulesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.NatRulesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.NatRulesClientDeleteResponse]] + newListByVPNGatewayPager *tracker[azfake.PagerResponder[armnetwork.NatRulesClientListByVPNGatewayResponse]] +} + +// Do implements the policy.Transporter interface for NatRulesServerTransport. +func (n *NatRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "NatRulesClient.BeginCreateOrUpdate": + resp, err = n.dispatchBeginCreateOrUpdate(req) + case "NatRulesClient.BeginDelete": + resp, err = n.dispatchBeginDelete(req) + case "NatRulesClient.Get": + resp, err = n.dispatchGet(req) + case "NatRulesClient.NewListByVPNGatewayPager": + resp, err = n.dispatchNewListByVPNGatewayPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (n *NatRulesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if n.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := n.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNGatewayNatRule](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + natRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := n.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, gatewayNameParam, natRuleNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + n.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + n.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + n.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (n *NatRulesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if n.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := n.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + natRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := n.srv.BeginDelete(req.Context(), resourceGroupNameParam, gatewayNameParam, natRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + n.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + n.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + n.beginDelete.remove(req) + } + + return resp, nil +} + +func (n *NatRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if n.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + natRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := n.srv.Get(req.Context(), resourceGroupNameParam, gatewayNameParam, natRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNGatewayNatRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (n *NatRulesServerTransport) dispatchNewListByVPNGatewayPager(req *http.Request) (*http.Response, error) { + if n.srv.NewListByVPNGatewayPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByVPNGatewayPager not implemented")} + } + newListByVPNGatewayPager := n.newListByVPNGatewayPager.get(req) + if newListByVPNGatewayPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + resp := n.srv.NewListByVPNGatewayPager(resourceGroupNameParam, gatewayNameParam, nil) + newListByVPNGatewayPager = &resp + n.newListByVPNGatewayPager.add(req, newListByVPNGatewayPager) + server.PagerResponderInjectNextLinks(newListByVPNGatewayPager, req, func(page *armnetwork.NatRulesClientListByVPNGatewayResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByVPNGatewayPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + n.newListByVPNGatewayPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByVPNGatewayPager) { + n.newListByVPNGatewayPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/operations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/operations_server.go new file mode 100644 index 00000000000..a526221a47d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/operations_server.go @@ -0,0 +1,96 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" +) + +// OperationsServer is a fake server for instances of the armnetwork.OperationsClient type. +type OperationsServer struct { + // NewListPager is the fake for method OperationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.OperationsClientListOptions) (resp azfake.PagerResponder[armnetwork.OperationsClientListResponse]) +} + +// NewOperationsServerTransport creates a new instance of OperationsServerTransport with the provided implementation. +// The returned OperationsServerTransport instance is connected to an instance of armnetwork.OperationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewOperationsServerTransport(srv *OperationsServer) *OperationsServerTransport { + return &OperationsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.OperationsClientListResponse]](), + } +} + +// OperationsServerTransport connects instances of armnetwork.OperationsClient to instances of OperationsServer. +// Don't use this type directly, use NewOperationsServerTransport instead. +type OperationsServerTransport struct { + srv *OperationsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.OperationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for OperationsServerTransport. +func (o *OperationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "OperationsClient.NewListPager": + resp, err = o.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (o *OperationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if o.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := o.newListPager.get(req) + if newListPager == nil { + resp := o.srv.NewListPager(nil) + newListPager = &resp + o.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.OperationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + o.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + o.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/p2svpngateways_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/p2svpngateways_server.go new file mode 100644 index 00000000000..569729e7dda --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/p2svpngateways_server.go @@ -0,0 +1,625 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// P2SVPNGatewaysServer is a fake server for instances of the armnetwork.P2SVPNGatewaysClient type. +type P2SVPNGatewaysServer struct { + // BeginCreateOrUpdate is the fake for method P2SVPNGatewaysClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, gatewayName string, p2SVPNGatewayParameters armnetwork.P2SVPNGateway, options *armnetwork.P2SVPNGatewaysClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method P2SVPNGatewaysClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.P2SVPNGatewaysClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientDeleteResponse], errResp azfake.ErrorResponder) + + // BeginDisconnectP2SVPNConnections is the fake for method P2SVPNGatewaysClient.BeginDisconnectP2SVPNConnections + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDisconnectP2SVPNConnections func(ctx context.Context, resourceGroupName string, p2SVPNGatewayName string, request armnetwork.P2SVPNConnectionRequest, options *armnetwork.P2SVPNGatewaysClientBeginDisconnectP2SVPNConnectionsOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientDisconnectP2SVPNConnectionsResponse], errResp azfake.ErrorResponder) + + // BeginGenerateVPNProfile is the fake for method P2SVPNGatewaysClient.BeginGenerateVPNProfile + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGenerateVPNProfile func(ctx context.Context, resourceGroupName string, gatewayName string, parameters armnetwork.P2SVPNProfileParameters, options *armnetwork.P2SVPNGatewaysClientBeginGenerateVPNProfileOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGenerateVPNProfileResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method P2SVPNGatewaysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.P2SVPNGatewaysClientGetOptions) (resp azfake.Responder[armnetwork.P2SVPNGatewaysClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGetP2SVPNConnectionHealth is the fake for method P2SVPNGatewaysClient.BeginGetP2SVPNConnectionHealth + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetP2SVPNConnectionHealth func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.P2SVPNGatewaysClientBeginGetP2SVPNConnectionHealthOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGetP2SVPNConnectionHealthResponse], errResp azfake.ErrorResponder) + + // BeginGetP2SVPNConnectionHealthDetailed is the fake for method P2SVPNGatewaysClient.BeginGetP2SVPNConnectionHealthDetailed + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetP2SVPNConnectionHealthDetailed func(ctx context.Context, resourceGroupName string, gatewayName string, request armnetwork.P2SVPNConnectionHealthRequest, options *armnetwork.P2SVPNGatewaysClientBeginGetP2SVPNConnectionHealthDetailedOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method P2SVPNGatewaysClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.P2SVPNGatewaysClientListOptions) (resp azfake.PagerResponder[armnetwork.P2SVPNGatewaysClientListResponse]) + + // NewListByResourceGroupPager is the fake for method P2SVPNGatewaysClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.P2SVPNGatewaysClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.P2SVPNGatewaysClientListByResourceGroupResponse]) + + // BeginReset is the fake for method P2SVPNGatewaysClient.BeginReset + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReset func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.P2SVPNGatewaysClientBeginResetOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientResetResponse], errResp azfake.ErrorResponder) + + // BeginUpdateTags is the fake for method P2SVPNGatewaysClient.BeginUpdateTags + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateTags func(ctx context.Context, resourceGroupName string, gatewayName string, p2SVPNGatewayParameters armnetwork.TagsObject, options *armnetwork.P2SVPNGatewaysClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewP2SVPNGatewaysServerTransport creates a new instance of P2SVPNGatewaysServerTransport with the provided implementation. +// The returned P2SVPNGatewaysServerTransport instance is connected to an instance of armnetwork.P2SVPNGatewaysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewP2SVPNGatewaysServerTransport(srv *P2SVPNGatewaysServer) *P2SVPNGatewaysServerTransport { + return &P2SVPNGatewaysServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientDeleteResponse]](), + beginDisconnectP2SVPNConnections: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientDisconnectP2SVPNConnectionsResponse]](), + beginGenerateVPNProfile: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGenerateVPNProfileResponse]](), + beginGetP2SVPNConnectionHealth: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGetP2SVPNConnectionHealthResponse]](), + beginGetP2SVPNConnectionHealthDetailed: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.P2SVPNGatewaysClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.P2SVPNGatewaysClientListByResourceGroupResponse]](), + beginReset: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientResetResponse]](), + beginUpdateTags: newTracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientUpdateTagsResponse]](), + } +} + +// P2SVPNGatewaysServerTransport connects instances of armnetwork.P2SVPNGatewaysClient to instances of P2SVPNGatewaysServer. +// Don't use this type directly, use NewP2SVPNGatewaysServerTransport instead. +type P2SVPNGatewaysServerTransport struct { + srv *P2SVPNGatewaysServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientDeleteResponse]] + beginDisconnectP2SVPNConnections *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientDisconnectP2SVPNConnectionsResponse]] + beginGenerateVPNProfile *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGenerateVPNProfileResponse]] + beginGetP2SVPNConnectionHealth *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGetP2SVPNConnectionHealthResponse]] + beginGetP2SVPNConnectionHealthDetailed *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.P2SVPNGatewaysClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.P2SVPNGatewaysClientListByResourceGroupResponse]] + beginReset *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientResetResponse]] + beginUpdateTags *tracker[azfake.PollerResponder[armnetwork.P2SVPNGatewaysClientUpdateTagsResponse]] +} + +// Do implements the policy.Transporter interface for P2SVPNGatewaysServerTransport. +func (p *P2SVPNGatewaysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "P2SVPNGatewaysClient.BeginCreateOrUpdate": + resp, err = p.dispatchBeginCreateOrUpdate(req) + case "P2SVPNGatewaysClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "P2SVPNGatewaysClient.BeginDisconnectP2SVPNConnections": + resp, err = p.dispatchBeginDisconnectP2SVPNConnections(req) + case "P2SVPNGatewaysClient.BeginGenerateVPNProfile": + resp, err = p.dispatchBeginGenerateVPNProfile(req) + case "P2SVPNGatewaysClient.Get": + resp, err = p.dispatchGet(req) + case "P2SVPNGatewaysClient.BeginGetP2SVPNConnectionHealth": + resp, err = p.dispatchBeginGetP2SVPNConnectionHealth(req) + case "P2SVPNGatewaysClient.BeginGetP2SVPNConnectionHealthDetailed": + resp, err = p.dispatchBeginGetP2SVPNConnectionHealthDetailed(req) + case "P2SVPNGatewaysClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + case "P2SVPNGatewaysClient.NewListByResourceGroupPager": + resp, err = p.dispatchNewListByResourceGroupPager(req) + case "P2SVPNGatewaysClient.BeginReset": + resp, err = p.dispatchBeginReset(req) + case "P2SVPNGatewaysClient.BeginUpdateTags": + resp, err = p.dispatchBeginUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := p.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.P2SVPNGateway](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, gatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + p.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + p.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + p.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, gatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginDisconnectP2SVPNConnections(req *http.Request) (*http.Response, error) { + if p.srv.BeginDisconnectP2SVPNConnections == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDisconnectP2SVPNConnections not implemented")} + } + beginDisconnectP2SVPNConnections := p.beginDisconnectP2SVPNConnections.get(req) + if beginDisconnectP2SVPNConnections == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/disconnectP2sVpnConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.P2SVPNConnectionRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + p2SVPNGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("p2sVpnGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDisconnectP2SVPNConnections(req.Context(), resourceGroupNameParam, p2SVPNGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDisconnectP2SVPNConnections = &respr + p.beginDisconnectP2SVPNConnections.add(req, beginDisconnectP2SVPNConnections) + } + + resp, err := server.PollerResponderNext(beginDisconnectP2SVPNConnections, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginDisconnectP2SVPNConnections.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDisconnectP2SVPNConnections) { + p.beginDisconnectP2SVPNConnections.remove(req) + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginGenerateVPNProfile(req *http.Request) (*http.Response, error) { + if p.srv.BeginGenerateVPNProfile == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGenerateVPNProfile not implemented")} + } + beginGenerateVPNProfile := p.beginGenerateVPNProfile.get(req) + if beginGenerateVPNProfile == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/generatevpnprofile` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.P2SVPNProfileParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginGenerateVPNProfile(req.Context(), resourceGroupNameParam, gatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGenerateVPNProfile = &respr + p.beginGenerateVPNProfile.add(req, beginGenerateVPNProfile) + } + + resp, err := server.PollerResponderNext(beginGenerateVPNProfile, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginGenerateVPNProfile.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGenerateVPNProfile) { + p.beginGenerateVPNProfile.remove(req) + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, gatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).P2SVPNGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginGetP2SVPNConnectionHealth(req *http.Request) (*http.Response, error) { + if p.srv.BeginGetP2SVPNConnectionHealth == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetP2SVPNConnectionHealth not implemented")} + } + beginGetP2SVPNConnectionHealth := p.beginGetP2SVPNConnectionHealth.get(req) + if beginGetP2SVPNConnectionHealth == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getP2sVpnConnectionHealth` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginGetP2SVPNConnectionHealth(req.Context(), resourceGroupNameParam, gatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetP2SVPNConnectionHealth = &respr + p.beginGetP2SVPNConnectionHealth.add(req, beginGetP2SVPNConnectionHealth) + } + + resp, err := server.PollerResponderNext(beginGetP2SVPNConnectionHealth, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginGetP2SVPNConnectionHealth.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetP2SVPNConnectionHealth) { + p.beginGetP2SVPNConnectionHealth.remove(req) + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginGetP2SVPNConnectionHealthDetailed(req *http.Request) (*http.Response, error) { + if p.srv.BeginGetP2SVPNConnectionHealthDetailed == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetP2SVPNConnectionHealthDetailed not implemented")} + } + beginGetP2SVPNConnectionHealthDetailed := p.beginGetP2SVPNConnectionHealthDetailed.get(req) + if beginGetP2SVPNConnectionHealthDetailed == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getP2sVpnConnectionHealthDetailed` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.P2SVPNConnectionHealthRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginGetP2SVPNConnectionHealthDetailed(req.Context(), resourceGroupNameParam, gatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetP2SVPNConnectionHealthDetailed = &respr + p.beginGetP2SVPNConnectionHealthDetailed.add(req, beginGetP2SVPNConnectionHealthDetailed) + } + + resp, err := server.PollerResponderNext(beginGetP2SVPNConnectionHealthDetailed, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginGetP2SVPNConnectionHealthDetailed.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetP2SVPNConnectionHealthDetailed) { + p.beginGetP2SVPNConnectionHealthDetailed.remove(req) + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := p.srv.NewListPager(nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.P2SVPNGatewaysClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := p.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + p.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.P2SVPNGatewaysClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + p.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginReset(req *http.Request) (*http.Response, error) { + if p.srv.BeginReset == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReset not implemented")} + } + beginReset := p.beginReset.get(req) + if beginReset == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reset` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginReset(req.Context(), resourceGroupNameParam, gatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReset = &respr + p.beginReset.add(req, beginReset) + } + + resp, err := server.PollerResponderNext(beginReset, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginReset.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReset) { + p.beginReset.remove(req) + } + + return resp, nil +} + +func (p *P2SVPNGatewaysServerTransport) dispatchBeginUpdateTags(req *http.Request) (*http.Response, error) { + if p.srv.BeginUpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateTags not implemented")} + } + beginUpdateTags := p.beginUpdateTags.get(req) + if beginUpdateTags == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/p2svpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginUpdateTags(req.Context(), resourceGroupNameParam, gatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateTags = &respr + p.beginUpdateTags.add(req, beginUpdateTags) + } + + resp, err := server.PollerResponderNext(beginUpdateTags, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginUpdateTags.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateTags) { + p.beginUpdateTags.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/packetcaptures_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/packetcaptures_server.go new file mode 100644 index 00000000000..fae214a2656 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/packetcaptures_server.go @@ -0,0 +1,380 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// PacketCapturesServer is a fake server for instances of the armnetwork.PacketCapturesClient type. +type PacketCapturesServer struct { + // BeginCreate is the fake for method PacketCapturesClient.BeginCreate + // HTTP status codes to indicate success: http.StatusCreated + BeginCreate func(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, parameters armnetwork.PacketCapture, options *armnetwork.PacketCapturesClientBeginCreateOptions) (resp azfake.PollerResponder[armnetwork.PacketCapturesClientCreateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method PacketCapturesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *armnetwork.PacketCapturesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.PacketCapturesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method PacketCapturesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *armnetwork.PacketCapturesClientGetOptions) (resp azfake.Responder[armnetwork.PacketCapturesClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGetStatus is the fake for method PacketCapturesClient.BeginGetStatus + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetStatus func(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *armnetwork.PacketCapturesClientBeginGetStatusOptions) (resp azfake.PollerResponder[armnetwork.PacketCapturesClientGetStatusResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method PacketCapturesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkWatcherName string, options *armnetwork.PacketCapturesClientListOptions) (resp azfake.PagerResponder[armnetwork.PacketCapturesClientListResponse]) + + // BeginStop is the fake for method PacketCapturesClient.BeginStop + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStop func(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *armnetwork.PacketCapturesClientBeginStopOptions) (resp azfake.PollerResponder[armnetwork.PacketCapturesClientStopResponse], errResp azfake.ErrorResponder) +} + +// NewPacketCapturesServerTransport creates a new instance of PacketCapturesServerTransport with the provided implementation. +// The returned PacketCapturesServerTransport instance is connected to an instance of armnetwork.PacketCapturesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewPacketCapturesServerTransport(srv *PacketCapturesServer) *PacketCapturesServerTransport { + return &PacketCapturesServerTransport{ + srv: srv, + beginCreate: newTracker[azfake.PollerResponder[armnetwork.PacketCapturesClientCreateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.PacketCapturesClientDeleteResponse]](), + beginGetStatus: newTracker[azfake.PollerResponder[armnetwork.PacketCapturesClientGetStatusResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.PacketCapturesClientListResponse]](), + beginStop: newTracker[azfake.PollerResponder[armnetwork.PacketCapturesClientStopResponse]](), + } +} + +// PacketCapturesServerTransport connects instances of armnetwork.PacketCapturesClient to instances of PacketCapturesServer. +// Don't use this type directly, use NewPacketCapturesServerTransport instead. +type PacketCapturesServerTransport struct { + srv *PacketCapturesServer + beginCreate *tracker[azfake.PollerResponder[armnetwork.PacketCapturesClientCreateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.PacketCapturesClientDeleteResponse]] + beginGetStatus *tracker[azfake.PollerResponder[armnetwork.PacketCapturesClientGetStatusResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.PacketCapturesClientListResponse]] + beginStop *tracker[azfake.PollerResponder[armnetwork.PacketCapturesClientStopResponse]] +} + +// Do implements the policy.Transporter interface for PacketCapturesServerTransport. +func (p *PacketCapturesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "PacketCapturesClient.BeginCreate": + resp, err = p.dispatchBeginCreate(req) + case "PacketCapturesClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "PacketCapturesClient.Get": + resp, err = p.dispatchGet(req) + case "PacketCapturesClient.BeginGetStatus": + resp, err = p.dispatchBeginGetStatus(req) + case "PacketCapturesClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + case "PacketCapturesClient.BeginStop": + resp, err = p.dispatchBeginStop(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *PacketCapturesServerTransport) dispatchBeginCreate(req *http.Request) (*http.Response, error) { + if p.srv.BeginCreate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreate not implemented")} + } + beginCreate := p.beginCreate.get(req) + if beginCreate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/packetCaptures/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PacketCapture](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + packetCaptureNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("packetCaptureName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCreate(req.Context(), resourceGroupNameParam, networkWatcherNameParam, packetCaptureNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreate = &respr + p.beginCreate.add(req, beginCreate) + } + + resp, err := server.PollerResponderNext(beginCreate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusCreated}, resp.StatusCode) { + p.beginCreate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreate) { + p.beginCreate.remove(req) + } + + return resp, nil +} + +func (p *PacketCapturesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/packetCaptures/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + packetCaptureNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("packetCaptureName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkWatcherNameParam, packetCaptureNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *PacketCapturesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/packetCaptures/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + packetCaptureNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("packetCaptureName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, networkWatcherNameParam, packetCaptureNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PacketCaptureResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PacketCapturesServerTransport) dispatchBeginGetStatus(req *http.Request) (*http.Response, error) { + if p.srv.BeginGetStatus == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetStatus not implemented")} + } + beginGetStatus := p.beginGetStatus.get(req) + if beginGetStatus == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/packetCaptures/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/queryStatus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + packetCaptureNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("packetCaptureName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginGetStatus(req.Context(), resourceGroupNameParam, networkWatcherNameParam, packetCaptureNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetStatus = &respr + p.beginGetStatus.add(req, beginGetStatus) + } + + resp, err := server.PollerResponderNext(beginGetStatus, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginGetStatus.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetStatus) { + p.beginGetStatus.remove(req) + } + + return resp, nil +} + +func (p *PacketCapturesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/packetCaptures` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(resourceGroupNameParam, networkWatcherNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} + +func (p *PacketCapturesServerTransport) dispatchBeginStop(req *http.Request) (*http.Response, error) { + if p.srv.BeginStop == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStop not implemented")} + } + beginStop := p.beginStop.get(req) + if beginStop == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/packetCaptures/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stop` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + packetCaptureNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("packetCaptureName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginStop(req.Context(), resourceGroupNameParam, networkWatcherNameParam, packetCaptureNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStop = &respr + p.beginStop.add(req, beginStop) + } + + resp, err := server.PollerResponderNext(beginStop, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginStop.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStop) { + p.beginStop.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/peerexpressroutecircuitconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/peerexpressroutecircuitconnections_server.go new file mode 100644 index 00000000000..c5fcccc1dad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/peerexpressroutecircuitconnections_server.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// PeerExpressRouteCircuitConnectionsServer is a fake server for instances of the armnetwork.PeerExpressRouteCircuitConnectionsClient type. +type PeerExpressRouteCircuitConnectionsServer struct { + // Get is the fake for method PeerExpressRouteCircuitConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, options *armnetwork.PeerExpressRouteCircuitConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.PeerExpressRouteCircuitConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method PeerExpressRouteCircuitConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, circuitName string, peeringName string, options *armnetwork.PeerExpressRouteCircuitConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.PeerExpressRouteCircuitConnectionsClientListResponse]) +} + +// NewPeerExpressRouteCircuitConnectionsServerTransport creates a new instance of PeerExpressRouteCircuitConnectionsServerTransport with the provided implementation. +// The returned PeerExpressRouteCircuitConnectionsServerTransport instance is connected to an instance of armnetwork.PeerExpressRouteCircuitConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewPeerExpressRouteCircuitConnectionsServerTransport(srv *PeerExpressRouteCircuitConnectionsServer) *PeerExpressRouteCircuitConnectionsServerTransport { + return &PeerExpressRouteCircuitConnectionsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.PeerExpressRouteCircuitConnectionsClientListResponse]](), + } +} + +// PeerExpressRouteCircuitConnectionsServerTransport connects instances of armnetwork.PeerExpressRouteCircuitConnectionsClient to instances of PeerExpressRouteCircuitConnectionsServer. +// Don't use this type directly, use NewPeerExpressRouteCircuitConnectionsServerTransport instead. +type PeerExpressRouteCircuitConnectionsServerTransport struct { + srv *PeerExpressRouteCircuitConnectionsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.PeerExpressRouteCircuitConnectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for PeerExpressRouteCircuitConnectionsServerTransport. +func (p *PeerExpressRouteCircuitConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "PeerExpressRouteCircuitConnectionsClient.Get": + resp, err = p.dispatchGet(req) + case "PeerExpressRouteCircuitConnectionsClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *PeerExpressRouteCircuitConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, circuitNameParam, peeringNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PeerExpressRouteCircuitConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PeerExpressRouteCircuitConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/expressRouteCircuits/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + circuitNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("circuitName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(resourceGroupNameParam, circuitNameParam, peeringNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.PeerExpressRouteCircuitConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/polymorphic_helpers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/polymorphic_helpers.go new file mode 100644 index 00000000000..866e9603b7b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/polymorphic_helpers.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "encoding/json" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" +) + +func unmarshalBaseAdminRuleClassification(rawMsg json.RawMessage) (armnetwork.BaseAdminRuleClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b armnetwork.BaseAdminRuleClassification + switch m["kind"] { + case string(armnetwork.AdminRuleKindCustom): + b = &armnetwork.AdminRule{} + case string(armnetwork.AdminRuleKindDefault): + b = &armnetwork.DefaultAdminRule{} + default: + b = &armnetwork.BaseAdminRule{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privatednszonegroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privatednszonegroups_server.go new file mode 100644 index 00000000000..55e2b053a78 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privatednszonegroups_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// PrivateDNSZoneGroupsServer is a fake server for instances of the armnetwork.PrivateDNSZoneGroupsClient type. +type PrivateDNSZoneGroupsServer struct { + // BeginCreateOrUpdate is the fake for method PrivateDNSZoneGroupsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, parameters armnetwork.PrivateDNSZoneGroup, options *armnetwork.PrivateDNSZoneGroupsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.PrivateDNSZoneGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method PrivateDNSZoneGroupsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, options *armnetwork.PrivateDNSZoneGroupsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.PrivateDNSZoneGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method PrivateDNSZoneGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, options *armnetwork.PrivateDNSZoneGroupsClientGetOptions) (resp azfake.Responder[armnetwork.PrivateDNSZoneGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method PrivateDNSZoneGroupsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(privateEndpointName string, resourceGroupName string, options *armnetwork.PrivateDNSZoneGroupsClientListOptions) (resp azfake.PagerResponder[armnetwork.PrivateDNSZoneGroupsClientListResponse]) +} + +// NewPrivateDNSZoneGroupsServerTransport creates a new instance of PrivateDNSZoneGroupsServerTransport with the provided implementation. +// The returned PrivateDNSZoneGroupsServerTransport instance is connected to an instance of armnetwork.PrivateDNSZoneGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewPrivateDNSZoneGroupsServerTransport(srv *PrivateDNSZoneGroupsServer) *PrivateDNSZoneGroupsServerTransport { + return &PrivateDNSZoneGroupsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.PrivateDNSZoneGroupsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.PrivateDNSZoneGroupsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.PrivateDNSZoneGroupsClientListResponse]](), + } +} + +// PrivateDNSZoneGroupsServerTransport connects instances of armnetwork.PrivateDNSZoneGroupsClient to instances of PrivateDNSZoneGroupsServer. +// Don't use this type directly, use NewPrivateDNSZoneGroupsServerTransport instead. +type PrivateDNSZoneGroupsServerTransport struct { + srv *PrivateDNSZoneGroupsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.PrivateDNSZoneGroupsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.PrivateDNSZoneGroupsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.PrivateDNSZoneGroupsClientListResponse]] +} + +// Do implements the policy.Transporter interface for PrivateDNSZoneGroupsServerTransport. +func (p *PrivateDNSZoneGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "PrivateDNSZoneGroupsClient.BeginCreateOrUpdate": + resp, err = p.dispatchBeginCreateOrUpdate(req) + case "PrivateDNSZoneGroupsClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "PrivateDNSZoneGroupsClient.Get": + resp, err = p.dispatchGet(req) + case "PrivateDNSZoneGroupsClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *PrivateDNSZoneGroupsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := p.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateDnsZoneGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PrivateDNSZoneGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + privateEndpointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointName")]) + if err != nil { + return nil, err + } + privateDNSZoneGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateDnsZoneGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, privateEndpointNameParam, privateDNSZoneGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + p.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + p.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + p.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (p *PrivateDNSZoneGroupsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateDnsZoneGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + privateEndpointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointName")]) + if err != nil { + return nil, err + } + privateDNSZoneGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateDnsZoneGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, privateEndpointNameParam, privateDNSZoneGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *PrivateDNSZoneGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateDnsZoneGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + privateEndpointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointName")]) + if err != nil { + return nil, err + } + privateDNSZoneGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateDnsZoneGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, privateEndpointNameParam, privateDNSZoneGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PrivateDNSZoneGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PrivateDNSZoneGroupsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateDnsZoneGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + privateEndpointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointName")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(privateEndpointNameParam, resourceGroupNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.PrivateDNSZoneGroupsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privateendpoints_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privateendpoints_server.go new file mode 100644 index 00000000000..cf00697f7a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privateendpoints_server.go @@ -0,0 +1,309 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// PrivateEndpointsServer is a fake server for instances of the armnetwork.PrivateEndpointsClient type. +type PrivateEndpointsServer struct { + // BeginCreateOrUpdate is the fake for method PrivateEndpointsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, privateEndpointName string, parameters armnetwork.PrivateEndpoint, options *armnetwork.PrivateEndpointsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.PrivateEndpointsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method PrivateEndpointsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, privateEndpointName string, options *armnetwork.PrivateEndpointsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.PrivateEndpointsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method PrivateEndpointsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, privateEndpointName string, options *armnetwork.PrivateEndpointsClientGetOptions) (resp azfake.Responder[armnetwork.PrivateEndpointsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method PrivateEndpointsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.PrivateEndpointsClientListOptions) (resp azfake.PagerResponder[armnetwork.PrivateEndpointsClientListResponse]) + + // NewListBySubscriptionPager is the fake for method PrivateEndpointsClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armnetwork.PrivateEndpointsClientListBySubscriptionOptions) (resp azfake.PagerResponder[armnetwork.PrivateEndpointsClientListBySubscriptionResponse]) +} + +// NewPrivateEndpointsServerTransport creates a new instance of PrivateEndpointsServerTransport with the provided implementation. +// The returned PrivateEndpointsServerTransport instance is connected to an instance of armnetwork.PrivateEndpointsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewPrivateEndpointsServerTransport(srv *PrivateEndpointsServer) *PrivateEndpointsServerTransport { + return &PrivateEndpointsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.PrivateEndpointsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.PrivateEndpointsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.PrivateEndpointsClientListResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armnetwork.PrivateEndpointsClientListBySubscriptionResponse]](), + } +} + +// PrivateEndpointsServerTransport connects instances of armnetwork.PrivateEndpointsClient to instances of PrivateEndpointsServer. +// Don't use this type directly, use NewPrivateEndpointsServerTransport instead. +type PrivateEndpointsServerTransport struct { + srv *PrivateEndpointsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.PrivateEndpointsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.PrivateEndpointsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.PrivateEndpointsClientListResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armnetwork.PrivateEndpointsClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for PrivateEndpointsServerTransport. +func (p *PrivateEndpointsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "PrivateEndpointsClient.BeginCreateOrUpdate": + resp, err = p.dispatchBeginCreateOrUpdate(req) + case "PrivateEndpointsClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "PrivateEndpointsClient.Get": + resp, err = p.dispatchGet(req) + case "PrivateEndpointsClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + case "PrivateEndpointsClient.NewListBySubscriptionPager": + resp, err = p.dispatchNewListBySubscriptionPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *PrivateEndpointsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := p.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PrivateEndpoint](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + privateEndpointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, privateEndpointNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + p.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + p.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + p.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (p *PrivateEndpointsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + privateEndpointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, privateEndpointNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *PrivateEndpointsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + privateEndpointNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("privateEndpointName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.PrivateEndpointsClientGetOptions + if expandParam != nil { + options = &armnetwork.PrivateEndpointsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, privateEndpointNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PrivateEndpoint, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PrivateEndpointsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.PrivateEndpointsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} + +func (p *PrivateEndpointsServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := p.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateEndpoints` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := p.srv.NewListBySubscriptionPager(nil) + newListBySubscriptionPager = &resp + p.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armnetwork.PrivateEndpointsClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + p.newListBySubscriptionPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privatelinkservices_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privatelinkservices_server.go new file mode 100644 index 00000000000..49b8769df88 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/privatelinkservices_server.go @@ -0,0 +1,718 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// PrivateLinkServicesServer is a fake server for instances of the armnetwork.PrivateLinkServicesClient type. +type PrivateLinkServicesServer struct { + // BeginCheckPrivateLinkServiceVisibility is the fake for method PrivateLinkServicesClient.BeginCheckPrivateLinkServiceVisibility + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCheckPrivateLinkServiceVisibility func(ctx context.Context, location string, parameters armnetwork.CheckPrivateLinkServiceVisibilityRequest, options *armnetwork.PrivateLinkServicesClientBeginCheckPrivateLinkServiceVisibilityOptions) (resp azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityResponse], errResp azfake.ErrorResponder) + + // BeginCheckPrivateLinkServiceVisibilityByResourceGroup is the fake for method PrivateLinkServicesClient.BeginCheckPrivateLinkServiceVisibilityByResourceGroup + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCheckPrivateLinkServiceVisibilityByResourceGroup func(ctx context.Context, location string, resourceGroupName string, parameters armnetwork.CheckPrivateLinkServiceVisibilityRequest, options *armnetwork.PrivateLinkServicesClientBeginCheckPrivateLinkServiceVisibilityByResourceGroupOptions) (resp azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityByResourceGroupResponse], errResp azfake.ErrorResponder) + + // BeginCreateOrUpdate is the fake for method PrivateLinkServicesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, serviceName string, parameters armnetwork.PrivateLinkService, options *armnetwork.PrivateLinkServicesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method PrivateLinkServicesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, serviceName string, options *armnetwork.PrivateLinkServicesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.PrivateLinkServicesClientDeleteResponse], errResp azfake.ErrorResponder) + + // BeginDeletePrivateEndpointConnection is the fake for method PrivateLinkServicesClient.BeginDeletePrivateEndpointConnection + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDeletePrivateEndpointConnection func(ctx context.Context, resourceGroupName string, serviceName string, peConnectionName string, options *armnetwork.PrivateLinkServicesClientBeginDeletePrivateEndpointConnectionOptions) (resp azfake.PollerResponder[armnetwork.PrivateLinkServicesClientDeletePrivateEndpointConnectionResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method PrivateLinkServicesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, serviceName string, options *armnetwork.PrivateLinkServicesClientGetOptions) (resp azfake.Responder[armnetwork.PrivateLinkServicesClientGetResponse], errResp azfake.ErrorResponder) + + // GetPrivateEndpointConnection is the fake for method PrivateLinkServicesClient.GetPrivateEndpointConnection + // HTTP status codes to indicate success: http.StatusOK + GetPrivateEndpointConnection func(ctx context.Context, resourceGroupName string, serviceName string, peConnectionName string, options *armnetwork.PrivateLinkServicesClientGetPrivateEndpointConnectionOptions) (resp azfake.Responder[armnetwork.PrivateLinkServicesClientGetPrivateEndpointConnectionResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method PrivateLinkServicesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.PrivateLinkServicesClientListOptions) (resp azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListResponse]) + + // NewListAutoApprovedPrivateLinkServicesPager is the fake for method PrivateLinkServicesClient.NewListAutoApprovedPrivateLinkServicesPager + // HTTP status codes to indicate success: http.StatusOK + NewListAutoApprovedPrivateLinkServicesPager func(location string, options *armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesOptions) (resp azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse]) + + // NewListAutoApprovedPrivateLinkServicesByResourceGroupPager is the fake for method PrivateLinkServicesClient.NewListAutoApprovedPrivateLinkServicesByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListAutoApprovedPrivateLinkServicesByResourceGroupPager func(location string, resourceGroupName string, options *armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse]) + + // NewListBySubscriptionPager is the fake for method PrivateLinkServicesClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armnetwork.PrivateLinkServicesClientListBySubscriptionOptions) (resp azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListBySubscriptionResponse]) + + // NewListPrivateEndpointConnectionsPager is the fake for method PrivateLinkServicesClient.NewListPrivateEndpointConnectionsPager + // HTTP status codes to indicate success: http.StatusOK + NewListPrivateEndpointConnectionsPager func(resourceGroupName string, serviceName string, options *armnetwork.PrivateLinkServicesClientListPrivateEndpointConnectionsOptions) (resp azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListPrivateEndpointConnectionsResponse]) + + // UpdatePrivateEndpointConnection is the fake for method PrivateLinkServicesClient.UpdatePrivateEndpointConnection + // HTTP status codes to indicate success: http.StatusOK + UpdatePrivateEndpointConnection func(ctx context.Context, resourceGroupName string, serviceName string, peConnectionName string, parameters armnetwork.PrivateEndpointConnection, options *armnetwork.PrivateLinkServicesClientUpdatePrivateEndpointConnectionOptions) (resp azfake.Responder[armnetwork.PrivateLinkServicesClientUpdatePrivateEndpointConnectionResponse], errResp azfake.ErrorResponder) +} + +// NewPrivateLinkServicesServerTransport creates a new instance of PrivateLinkServicesServerTransport with the provided implementation. +// The returned PrivateLinkServicesServerTransport instance is connected to an instance of armnetwork.PrivateLinkServicesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewPrivateLinkServicesServerTransport(srv *PrivateLinkServicesServer) *PrivateLinkServicesServerTransport { + return &PrivateLinkServicesServerTransport{ + srv: srv, + beginCheckPrivateLinkServiceVisibility: newTracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityResponse]](), + beginCheckPrivateLinkServiceVisibilityByResourceGroup: newTracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityByResourceGroupResponse]](), + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientDeleteResponse]](), + beginDeletePrivateEndpointConnection: newTracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientDeletePrivateEndpointConnectionResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListResponse]](), + newListAutoApprovedPrivateLinkServicesPager: newTracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse]](), + newListAutoApprovedPrivateLinkServicesByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse]](), + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListBySubscriptionResponse]](), + newListPrivateEndpointConnectionsPager: newTracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListPrivateEndpointConnectionsResponse]](), + } +} + +// PrivateLinkServicesServerTransport connects instances of armnetwork.PrivateLinkServicesClient to instances of PrivateLinkServicesServer. +// Don't use this type directly, use NewPrivateLinkServicesServerTransport instead. +type PrivateLinkServicesServerTransport struct { + srv *PrivateLinkServicesServer + beginCheckPrivateLinkServiceVisibility *tracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityResponse]] + beginCheckPrivateLinkServiceVisibilityByResourceGroup *tracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityByResourceGroupResponse]] + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientDeleteResponse]] + beginDeletePrivateEndpointConnection *tracker[azfake.PollerResponder[armnetwork.PrivateLinkServicesClientDeletePrivateEndpointConnectionResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListResponse]] + newListAutoApprovedPrivateLinkServicesPager *tracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse]] + newListAutoApprovedPrivateLinkServicesByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse]] + newListBySubscriptionPager *tracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListBySubscriptionResponse]] + newListPrivateEndpointConnectionsPager *tracker[azfake.PagerResponder[armnetwork.PrivateLinkServicesClientListPrivateEndpointConnectionsResponse]] +} + +// Do implements the policy.Transporter interface for PrivateLinkServicesServerTransport. +func (p *PrivateLinkServicesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "PrivateLinkServicesClient.BeginCheckPrivateLinkServiceVisibility": + resp, err = p.dispatchBeginCheckPrivateLinkServiceVisibility(req) + case "PrivateLinkServicesClient.BeginCheckPrivateLinkServiceVisibilityByResourceGroup": + resp, err = p.dispatchBeginCheckPrivateLinkServiceVisibilityByResourceGroup(req) + case "PrivateLinkServicesClient.BeginCreateOrUpdate": + resp, err = p.dispatchBeginCreateOrUpdate(req) + case "PrivateLinkServicesClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "PrivateLinkServicesClient.BeginDeletePrivateEndpointConnection": + resp, err = p.dispatchBeginDeletePrivateEndpointConnection(req) + case "PrivateLinkServicesClient.Get": + resp, err = p.dispatchGet(req) + case "PrivateLinkServicesClient.GetPrivateEndpointConnection": + resp, err = p.dispatchGetPrivateEndpointConnection(req) + case "PrivateLinkServicesClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + case "PrivateLinkServicesClient.NewListAutoApprovedPrivateLinkServicesPager": + resp, err = p.dispatchNewListAutoApprovedPrivateLinkServicesPager(req) + case "PrivateLinkServicesClient.NewListAutoApprovedPrivateLinkServicesByResourceGroupPager": + resp, err = p.dispatchNewListAutoApprovedPrivateLinkServicesByResourceGroupPager(req) + case "PrivateLinkServicesClient.NewListBySubscriptionPager": + resp, err = p.dispatchNewListBySubscriptionPager(req) + case "PrivateLinkServicesClient.NewListPrivateEndpointConnectionsPager": + resp, err = p.dispatchNewListPrivateEndpointConnectionsPager(req) + case "PrivateLinkServicesClient.UpdatePrivateEndpointConnection": + resp, err = p.dispatchUpdatePrivateEndpointConnection(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchBeginCheckPrivateLinkServiceVisibility(req *http.Request) (*http.Response, error) { + if p.srv.BeginCheckPrivateLinkServiceVisibility == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCheckPrivateLinkServiceVisibility not implemented")} + } + beginCheckPrivateLinkServiceVisibility := p.beginCheckPrivateLinkServiceVisibility.get(req) + if beginCheckPrivateLinkServiceVisibility == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/checkPrivateLinkServiceVisibility` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.CheckPrivateLinkServiceVisibilityRequest](req) + if err != nil { + return nil, err + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCheckPrivateLinkServiceVisibility(req.Context(), locationParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCheckPrivateLinkServiceVisibility = &respr + p.beginCheckPrivateLinkServiceVisibility.add(req, beginCheckPrivateLinkServiceVisibility) + } + + resp, err := server.PollerResponderNext(beginCheckPrivateLinkServiceVisibility, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginCheckPrivateLinkServiceVisibility.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCheckPrivateLinkServiceVisibility) { + p.beginCheckPrivateLinkServiceVisibility.remove(req) + } + + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchBeginCheckPrivateLinkServiceVisibilityByResourceGroup(req *http.Request) (*http.Response, error) { + if p.srv.BeginCheckPrivateLinkServiceVisibilityByResourceGroup == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCheckPrivateLinkServiceVisibilityByResourceGroup not implemented")} + } + beginCheckPrivateLinkServiceVisibilityByResourceGroup := p.beginCheckPrivateLinkServiceVisibilityByResourceGroup.get(req) + if beginCheckPrivateLinkServiceVisibilityByResourceGroup == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/checkPrivateLinkServiceVisibility` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.CheckPrivateLinkServiceVisibilityRequest](req) + if err != nil { + return nil, err + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCheckPrivateLinkServiceVisibilityByResourceGroup(req.Context(), locationParam, resourceGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCheckPrivateLinkServiceVisibilityByResourceGroup = &respr + p.beginCheckPrivateLinkServiceVisibilityByResourceGroup.add(req, beginCheckPrivateLinkServiceVisibilityByResourceGroup) + } + + resp, err := server.PollerResponderNext(beginCheckPrivateLinkServiceVisibilityByResourceGroup, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginCheckPrivateLinkServiceVisibilityByResourceGroup.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCheckPrivateLinkServiceVisibilityByResourceGroup) { + p.beginCheckPrivateLinkServiceVisibilityByResourceGroup.remove(req) + } + + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := p.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PrivateLinkService](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, serviceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + p.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + p.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + p.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, serviceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchBeginDeletePrivateEndpointConnection(req *http.Request) (*http.Response, error) { + if p.srv.BeginDeletePrivateEndpointConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDeletePrivateEndpointConnection not implemented")} + } + beginDeletePrivateEndpointConnection := p.beginDeletePrivateEndpointConnection.get(req) + if beginDeletePrivateEndpointConnection == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceName")]) + if err != nil { + return nil, err + } + peConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDeletePrivateEndpointConnection(req.Context(), resourceGroupNameParam, serviceNameParam, peConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDeletePrivateEndpointConnection = &respr + p.beginDeletePrivateEndpointConnection.add(req, beginDeletePrivateEndpointConnection) + } + + resp, err := server.PollerResponderNext(beginDeletePrivateEndpointConnection, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDeletePrivateEndpointConnection.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDeletePrivateEndpointConnection) { + p.beginDeletePrivateEndpointConnection.remove(req) + } + + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.PrivateLinkServicesClientGetOptions + if expandParam != nil { + options = &armnetwork.PrivateLinkServicesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, serviceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PrivateLinkService, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchGetPrivateEndpointConnection(req *http.Request) (*http.Response, error) { + if p.srv.GetPrivateEndpointConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method GetPrivateEndpointConnection not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceName")]) + if err != nil { + return nil, err + } + peConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peConnectionName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.PrivateLinkServicesClientGetPrivateEndpointConnectionOptions + if expandParam != nil { + options = &armnetwork.PrivateLinkServicesClientGetPrivateEndpointConnectionOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.GetPrivateEndpointConnection(req.Context(), resourceGroupNameParam, serviceNameParam, peConnectionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PrivateEndpointConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.PrivateLinkServicesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchNewListAutoApprovedPrivateLinkServicesPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListAutoApprovedPrivateLinkServicesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAutoApprovedPrivateLinkServicesPager not implemented")} + } + newListAutoApprovedPrivateLinkServicesPager := p.newListAutoApprovedPrivateLinkServicesPager.get(req) + if newListAutoApprovedPrivateLinkServicesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/autoApprovedPrivateLinkServices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListAutoApprovedPrivateLinkServicesPager(locationParam, nil) + newListAutoApprovedPrivateLinkServicesPager = &resp + p.newListAutoApprovedPrivateLinkServicesPager.add(req, newListAutoApprovedPrivateLinkServicesPager) + server.PagerResponderInjectNextLinks(newListAutoApprovedPrivateLinkServicesPager, req, func(page *armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAutoApprovedPrivateLinkServicesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListAutoApprovedPrivateLinkServicesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAutoApprovedPrivateLinkServicesPager) { + p.newListAutoApprovedPrivateLinkServicesPager.remove(req) + } + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchNewListAutoApprovedPrivateLinkServicesByResourceGroupPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListAutoApprovedPrivateLinkServicesByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAutoApprovedPrivateLinkServicesByResourceGroupPager not implemented")} + } + newListAutoApprovedPrivateLinkServicesByResourceGroupPager := p.newListAutoApprovedPrivateLinkServicesByResourceGroupPager.get(req) + if newListAutoApprovedPrivateLinkServicesByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/autoApprovedPrivateLinkServices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListAutoApprovedPrivateLinkServicesByResourceGroupPager(locationParam, resourceGroupNameParam, nil) + newListAutoApprovedPrivateLinkServicesByResourceGroupPager = &resp + p.newListAutoApprovedPrivateLinkServicesByResourceGroupPager.add(req, newListAutoApprovedPrivateLinkServicesByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListAutoApprovedPrivateLinkServicesByResourceGroupPager, req, func(page *armnetwork.PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAutoApprovedPrivateLinkServicesByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListAutoApprovedPrivateLinkServicesByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAutoApprovedPrivateLinkServicesByResourceGroupPager) { + p.newListAutoApprovedPrivateLinkServicesByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := p.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := p.srv.NewListBySubscriptionPager(nil) + newListBySubscriptionPager = &resp + p.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armnetwork.PrivateLinkServicesClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + p.newListBySubscriptionPager.remove(req) + } + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchNewListPrivateEndpointConnectionsPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPrivateEndpointConnectionsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPrivateEndpointConnectionsPager not implemented")} + } + newListPrivateEndpointConnectionsPager := p.newListPrivateEndpointConnectionsPager.get(req) + if newListPrivateEndpointConnectionsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPrivateEndpointConnectionsPager(resourceGroupNameParam, serviceNameParam, nil) + newListPrivateEndpointConnectionsPager = &resp + p.newListPrivateEndpointConnectionsPager.add(req, newListPrivateEndpointConnectionsPager) + server.PagerResponderInjectNextLinks(newListPrivateEndpointConnectionsPager, req, func(page *armnetwork.PrivateLinkServicesClientListPrivateEndpointConnectionsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPrivateEndpointConnectionsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPrivateEndpointConnectionsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPrivateEndpointConnectionsPager) { + p.newListPrivateEndpointConnectionsPager.remove(req) + } + return resp, nil +} + +func (p *PrivateLinkServicesServerTransport) dispatchUpdatePrivateEndpointConnection(req *http.Request) (*http.Response, error) { + if p.srv.UpdatePrivateEndpointConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdatePrivateEndpointConnection not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/privateLinkServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/privateEndpointConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PrivateEndpointConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceName")]) + if err != nil { + return nil, err + } + peConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.UpdatePrivateEndpointConnection(req.Context(), resourceGroupNameParam, serviceNameParam, peConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PrivateEndpointConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/profiles_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/profiles_server.go new file mode 100644 index 00000000000..c6a7e07dfcc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/profiles_server.go @@ -0,0 +1,339 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ProfilesServer is a fake server for instances of the armnetwork.ProfilesClient type. +type ProfilesServer struct { + // CreateOrUpdate is the fake for method ProfilesClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkProfileName string, parameters armnetwork.Profile, options *armnetwork.ProfilesClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.ProfilesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ProfilesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkProfileName string, options *armnetwork.ProfilesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ProfilesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ProfilesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkProfileName string, options *armnetwork.ProfilesClientGetOptions) (resp azfake.Responder[armnetwork.ProfilesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ProfilesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.ProfilesClientListOptions) (resp azfake.PagerResponder[armnetwork.ProfilesClientListResponse]) + + // NewListAllPager is the fake for method ProfilesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.ProfilesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.ProfilesClientListAllResponse]) + + // UpdateTags is the fake for method ProfilesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, networkProfileName string, parameters armnetwork.TagsObject, options *armnetwork.ProfilesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ProfilesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewProfilesServerTransport creates a new instance of ProfilesServerTransport with the provided implementation. +// The returned ProfilesServerTransport instance is connected to an instance of armnetwork.ProfilesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewProfilesServerTransport(srv *ProfilesServer) *ProfilesServerTransport { + return &ProfilesServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ProfilesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ProfilesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.ProfilesClientListAllResponse]](), + } +} + +// ProfilesServerTransport connects instances of armnetwork.ProfilesClient to instances of ProfilesServer. +// Don't use this type directly, use NewProfilesServerTransport instead. +type ProfilesServerTransport struct { + srv *ProfilesServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.ProfilesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ProfilesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.ProfilesClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for ProfilesServerTransport. +func (p *ProfilesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ProfilesClient.CreateOrUpdate": + resp, err = p.dispatchCreateOrUpdate(req) + case "ProfilesClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "ProfilesClient.Get": + resp, err = p.dispatchGet(req) + case "ProfilesClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + case "ProfilesClient.NewListAllPager": + resp, err = p.dispatchNewListAllPager(req) + case "ProfilesClient.UpdateTags": + resp, err = p.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *ProfilesServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkProfiles/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.Profile](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkProfileNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkProfileName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkProfileNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Profile, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *ProfilesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkProfiles/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkProfileNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkProfileName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkProfileNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *ProfilesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkProfiles/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkProfileNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkProfileName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.ProfilesClientGetOptions + if expandParam != nil { + options = &armnetwork.ProfilesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, networkProfileNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Profile, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *ProfilesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkProfiles` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ProfilesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} + +func (p *ProfilesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := p.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkProfiles` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := p.srv.NewListAllPager(nil) + newListAllPager = &resp + p.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.ProfilesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + p.newListAllPager.remove(req) + } + return resp, nil +} + +func (p *ProfilesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if p.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkProfiles/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkProfileNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkProfileName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.UpdateTags(req.Context(), resourceGroupNameParam, networkProfileNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Profile, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/publicipaddresses_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/publicipaddresses_server.go new file mode 100644 index 00000000000..5df2c89fbf4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/publicipaddresses_server.go @@ -0,0 +1,758 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// PublicIPAddressesServer is a fake server for instances of the armnetwork.PublicIPAddressesClient type. +type PublicIPAddressesServer struct { + // BeginCreateOrUpdate is the fake for method PublicIPAddressesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters armnetwork.PublicIPAddress, options *armnetwork.PublicIPAddressesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.PublicIPAddressesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDdosProtectionStatus is the fake for method PublicIPAddressesClient.BeginDdosProtectionStatus + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDdosProtectionStatus func(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *armnetwork.PublicIPAddressesClientBeginDdosProtectionStatusOptions) (resp azfake.PollerResponder[armnetwork.PublicIPAddressesClientDdosProtectionStatusResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method PublicIPAddressesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *armnetwork.PublicIPAddressesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.PublicIPAddressesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method PublicIPAddressesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *armnetwork.PublicIPAddressesClientGetOptions) (resp azfake.Responder[armnetwork.PublicIPAddressesClientGetResponse], errResp azfake.ErrorResponder) + + // GetCloudServicePublicIPAddress is the fake for method PublicIPAddressesClient.GetCloudServicePublicIPAddress + // HTTP status codes to indicate success: http.StatusOK + GetCloudServicePublicIPAddress func(ctx context.Context, resourceGroupName string, cloudServiceName string, roleInstanceName string, networkInterfaceName string, ipConfigurationName string, publicIPAddressName string, options *armnetwork.PublicIPAddressesClientGetCloudServicePublicIPAddressOptions) (resp azfake.Responder[armnetwork.PublicIPAddressesClientGetCloudServicePublicIPAddressResponse], errResp azfake.ErrorResponder) + + // GetVirtualMachineScaleSetPublicIPAddress is the fake for method PublicIPAddressesClient.GetVirtualMachineScaleSetPublicIPAddress + // HTTP status codes to indicate success: http.StatusOK + GetVirtualMachineScaleSetPublicIPAddress func(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, ipConfigurationName string, publicIPAddressName string, options *armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressOptions) (resp azfake.Responder[armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method PublicIPAddressesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.PublicIPAddressesClientListOptions) (resp azfake.PagerResponder[armnetwork.PublicIPAddressesClientListResponse]) + + // NewListAllPager is the fake for method PublicIPAddressesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.PublicIPAddressesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.PublicIPAddressesClientListAllResponse]) + + // NewListCloudServicePublicIPAddressesPager is the fake for method PublicIPAddressesClient.NewListCloudServicePublicIPAddressesPager + // HTTP status codes to indicate success: http.StatusOK + NewListCloudServicePublicIPAddressesPager func(resourceGroupName string, cloudServiceName string, options *armnetwork.PublicIPAddressesClientListCloudServicePublicIPAddressesOptions) (resp azfake.PagerResponder[armnetwork.PublicIPAddressesClientListCloudServicePublicIPAddressesResponse]) + + // NewListCloudServiceRoleInstancePublicIPAddressesPager is the fake for method PublicIPAddressesClient.NewListCloudServiceRoleInstancePublicIPAddressesPager + // HTTP status codes to indicate success: http.StatusOK + NewListCloudServiceRoleInstancePublicIPAddressesPager func(resourceGroupName string, cloudServiceName string, roleInstanceName string, networkInterfaceName string, ipConfigurationName string, options *armnetwork.PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesOptions) (resp azfake.PagerResponder[armnetwork.PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse]) + + // NewListVirtualMachineScaleSetPublicIPAddressesPager is the fake for method PublicIPAddressesClient.NewListVirtualMachineScaleSetPublicIPAddressesPager + // HTTP status codes to indicate success: http.StatusOK + NewListVirtualMachineScaleSetPublicIPAddressesPager func(resourceGroupName string, virtualMachineScaleSetName string, options *armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesOptions) (resp azfake.PagerResponder[armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse]) + + // NewListVirtualMachineScaleSetVMPublicIPAddressesPager is the fake for method PublicIPAddressesClient.NewListVirtualMachineScaleSetVMPublicIPAddressesPager + // HTTP status codes to indicate success: http.StatusOK + NewListVirtualMachineScaleSetVMPublicIPAddressesPager func(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, ipConfigurationName string, options *armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesOptions) (resp azfake.PagerResponder[armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse]) + + // UpdateTags is the fake for method PublicIPAddressesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters armnetwork.TagsObject, options *armnetwork.PublicIPAddressesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.PublicIPAddressesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewPublicIPAddressesServerTransport creates a new instance of PublicIPAddressesServerTransport with the provided implementation. +// The returned PublicIPAddressesServerTransport instance is connected to an instance of armnetwork.PublicIPAddressesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewPublicIPAddressesServerTransport(srv *PublicIPAddressesServer) *PublicIPAddressesServerTransport { + return &PublicIPAddressesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.PublicIPAddressesClientCreateOrUpdateResponse]](), + beginDdosProtectionStatus: newTracker[azfake.PollerResponder[armnetwork.PublicIPAddressesClientDdosProtectionStatusResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.PublicIPAddressesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListAllResponse]](), + newListCloudServicePublicIPAddressesPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListCloudServicePublicIPAddressesResponse]](), + newListCloudServiceRoleInstancePublicIPAddressesPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse]](), + newListVirtualMachineScaleSetPublicIPAddressesPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse]](), + newListVirtualMachineScaleSetVMPublicIPAddressesPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse]](), + } +} + +// PublicIPAddressesServerTransport connects instances of armnetwork.PublicIPAddressesClient to instances of PublicIPAddressesServer. +// Don't use this type directly, use NewPublicIPAddressesServerTransport instead. +type PublicIPAddressesServerTransport struct { + srv *PublicIPAddressesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.PublicIPAddressesClientCreateOrUpdateResponse]] + beginDdosProtectionStatus *tracker[azfake.PollerResponder[armnetwork.PublicIPAddressesClientDdosProtectionStatusResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.PublicIPAddressesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListAllResponse]] + newListCloudServicePublicIPAddressesPager *tracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListCloudServicePublicIPAddressesResponse]] + newListCloudServiceRoleInstancePublicIPAddressesPager *tracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse]] + newListVirtualMachineScaleSetPublicIPAddressesPager *tracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse]] + newListVirtualMachineScaleSetVMPublicIPAddressesPager *tracker[azfake.PagerResponder[armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse]] +} + +// Do implements the policy.Transporter interface for PublicIPAddressesServerTransport. +func (p *PublicIPAddressesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "PublicIPAddressesClient.BeginCreateOrUpdate": + resp, err = p.dispatchBeginCreateOrUpdate(req) + case "PublicIPAddressesClient.BeginDdosProtectionStatus": + resp, err = p.dispatchBeginDdosProtectionStatus(req) + case "PublicIPAddressesClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "PublicIPAddressesClient.Get": + resp, err = p.dispatchGet(req) + case "PublicIPAddressesClient.GetCloudServicePublicIPAddress": + resp, err = p.dispatchGetCloudServicePublicIPAddress(req) + case "PublicIPAddressesClient.GetVirtualMachineScaleSetPublicIPAddress": + resp, err = p.dispatchGetVirtualMachineScaleSetPublicIPAddress(req) + case "PublicIPAddressesClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + case "PublicIPAddressesClient.NewListAllPager": + resp, err = p.dispatchNewListAllPager(req) + case "PublicIPAddressesClient.NewListCloudServicePublicIPAddressesPager": + resp, err = p.dispatchNewListCloudServicePublicIPAddressesPager(req) + case "PublicIPAddressesClient.NewListCloudServiceRoleInstancePublicIPAddressesPager": + resp, err = p.dispatchNewListCloudServiceRoleInstancePublicIPAddressesPager(req) + case "PublicIPAddressesClient.NewListVirtualMachineScaleSetPublicIPAddressesPager": + resp, err = p.dispatchNewListVirtualMachineScaleSetPublicIPAddressesPager(req) + case "PublicIPAddressesClient.NewListVirtualMachineScaleSetVMPublicIPAddressesPager": + resp, err = p.dispatchNewListVirtualMachineScaleSetVMPublicIPAddressesPager(req) + case "PublicIPAddressesClient.UpdateTags": + resp, err = p.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := p.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPAddresses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PublicIPAddress](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPAddressNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpAddressName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, publicIPAddressNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + p.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + p.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + p.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchBeginDdosProtectionStatus(req *http.Request) (*http.Response, error) { + if p.srv.BeginDdosProtectionStatus == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDdosProtectionStatus not implemented")} + } + beginDdosProtectionStatus := p.beginDdosProtectionStatus.get(req) + if beginDdosProtectionStatus == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPAddresses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ddosProtectionStatus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPAddressNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpAddressName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDdosProtectionStatus(req.Context(), resourceGroupNameParam, publicIPAddressNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDdosProtectionStatus = &respr + p.beginDdosProtectionStatus.add(req, beginDdosProtectionStatus) + } + + resp, err := server.PollerResponderNext(beginDdosProtectionStatus, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + p.beginDdosProtectionStatus.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDdosProtectionStatus) { + p.beginDdosProtectionStatus.remove(req) + } + + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPAddresses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPAddressNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpAddressName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, publicIPAddressNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPAddresses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPAddressNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpAddressName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.PublicIPAddressesClientGetOptions + if expandParam != nil { + options = &armnetwork.PublicIPAddressesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, publicIPAddressNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PublicIPAddress, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchGetCloudServicePublicIPAddress(req *http.Request) (*http.Response, error) { + if p.srv.GetCloudServicePublicIPAddress == nil { + return nil, &nonRetriableError{errors.New("fake for method GetCloudServicePublicIPAddress not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipconfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publicipaddresses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 7 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + ipConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigurationName")]) + if err != nil { + return nil, err + } + publicIPAddressNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpAddressName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.PublicIPAddressesClientGetCloudServicePublicIPAddressOptions + if expandParam != nil { + options = &armnetwork.PublicIPAddressesClientGetCloudServicePublicIPAddressOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.GetCloudServicePublicIPAddress(req.Context(), resourceGroupNameParam, cloudServiceNameParam, roleInstanceNameParam, networkInterfaceNameParam, ipConfigurationNameParam, publicIPAddressNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PublicIPAddress, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchGetVirtualMachineScaleSetPublicIPAddress(req *http.Request) (*http.Response, error) { + if p.srv.GetVirtualMachineScaleSetPublicIPAddress == nil { + return nil, &nonRetriableError{errors.New("fake for method GetVirtualMachineScaleSetPublicIPAddress not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipconfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publicipaddresses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 7 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + virtualmachineIndexParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualmachineIndex")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + ipConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigurationName")]) + if err != nil { + return nil, err + } + publicIPAddressNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpAddressName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressOptions + if expandParam != nil { + options = &armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.GetVirtualMachineScaleSetPublicIPAddress(req.Context(), resourceGroupNameParam, virtualMachineScaleSetNameParam, virtualmachineIndexParam, networkInterfaceNameParam, ipConfigurationNameParam, publicIPAddressNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PublicIPAddress, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPAddresses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.PublicIPAddressesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := p.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPAddresses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := p.srv.NewListAllPager(nil) + newListAllPager = &resp + p.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.PublicIPAddressesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + p.newListAllPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchNewListCloudServicePublicIPAddressesPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListCloudServicePublicIPAddressesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListCloudServicePublicIPAddressesPager not implemented")} + } + newListCloudServicePublicIPAddressesPager := p.newListCloudServicePublicIPAddressesPager.get(req) + if newListCloudServicePublicIPAddressesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publicipaddresses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListCloudServicePublicIPAddressesPager(resourceGroupNameParam, cloudServiceNameParam, nil) + newListCloudServicePublicIPAddressesPager = &resp + p.newListCloudServicePublicIPAddressesPager.add(req, newListCloudServicePublicIPAddressesPager) + server.PagerResponderInjectNextLinks(newListCloudServicePublicIPAddressesPager, req, func(page *armnetwork.PublicIPAddressesClientListCloudServicePublicIPAddressesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListCloudServicePublicIPAddressesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListCloudServicePublicIPAddressesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListCloudServicePublicIPAddressesPager) { + p.newListCloudServicePublicIPAddressesPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchNewListCloudServiceRoleInstancePublicIPAddressesPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListCloudServiceRoleInstancePublicIPAddressesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListCloudServiceRoleInstancePublicIPAddressesPager not implemented")} + } + newListCloudServiceRoleInstancePublicIPAddressesPager := p.newListCloudServiceRoleInstancePublicIPAddressesPager.get(req) + if newListCloudServiceRoleInstancePublicIPAddressesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/roleInstances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipconfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publicipaddresses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + cloudServiceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("cloudServiceName")]) + if err != nil { + return nil, err + } + roleInstanceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("roleInstanceName")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + ipConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigurationName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListCloudServiceRoleInstancePublicIPAddressesPager(resourceGroupNameParam, cloudServiceNameParam, roleInstanceNameParam, networkInterfaceNameParam, ipConfigurationNameParam, nil) + newListCloudServiceRoleInstancePublicIPAddressesPager = &resp + p.newListCloudServiceRoleInstancePublicIPAddressesPager.add(req, newListCloudServiceRoleInstancePublicIPAddressesPager) + server.PagerResponderInjectNextLinks(newListCloudServiceRoleInstancePublicIPAddressesPager, req, func(page *armnetwork.PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListCloudServiceRoleInstancePublicIPAddressesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListCloudServiceRoleInstancePublicIPAddressesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListCloudServiceRoleInstancePublicIPAddressesPager) { + p.newListCloudServiceRoleInstancePublicIPAddressesPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchNewListVirtualMachineScaleSetPublicIPAddressesPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListVirtualMachineScaleSetPublicIPAddressesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListVirtualMachineScaleSetPublicIPAddressesPager not implemented")} + } + newListVirtualMachineScaleSetPublicIPAddressesPager := p.newListVirtualMachineScaleSetPublicIPAddressesPager.get(req) + if newListVirtualMachineScaleSetPublicIPAddressesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publicipaddresses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListVirtualMachineScaleSetPublicIPAddressesPager(resourceGroupNameParam, virtualMachineScaleSetNameParam, nil) + newListVirtualMachineScaleSetPublicIPAddressesPager = &resp + p.newListVirtualMachineScaleSetPublicIPAddressesPager.add(req, newListVirtualMachineScaleSetPublicIPAddressesPager) + server.PagerResponderInjectNextLinks(newListVirtualMachineScaleSetPublicIPAddressesPager, req, func(page *armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListVirtualMachineScaleSetPublicIPAddressesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListVirtualMachineScaleSetPublicIPAddressesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListVirtualMachineScaleSetPublicIPAddressesPager) { + p.newListVirtualMachineScaleSetPublicIPAddressesPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchNewListVirtualMachineScaleSetVMPublicIPAddressesPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListVirtualMachineScaleSetVMPublicIPAddressesPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListVirtualMachineScaleSetVMPublicIPAddressesPager not implemented")} + } + newListVirtualMachineScaleSetVMPublicIPAddressesPager := p.newListVirtualMachineScaleSetVMPublicIPAddressesPager.get(req) + if newListVirtualMachineScaleSetVMPublicIPAddressesPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/virtualMachineScaleSets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualMachines/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkInterfaces/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipconfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/publicipaddresses` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 6 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualMachineScaleSetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualMachineScaleSetName")]) + if err != nil { + return nil, err + } + virtualmachineIndexParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualmachineIndex")]) + if err != nil { + return nil, err + } + networkInterfaceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkInterfaceName")]) + if err != nil { + return nil, err + } + ipConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigurationName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListVirtualMachineScaleSetVMPublicIPAddressesPager(resourceGroupNameParam, virtualMachineScaleSetNameParam, virtualmachineIndexParam, networkInterfaceNameParam, ipConfigurationNameParam, nil) + newListVirtualMachineScaleSetVMPublicIPAddressesPager = &resp + p.newListVirtualMachineScaleSetVMPublicIPAddressesPager.add(req, newListVirtualMachineScaleSetVMPublicIPAddressesPager) + server.PagerResponderInjectNextLinks(newListVirtualMachineScaleSetVMPublicIPAddressesPager, req, func(page *armnetwork.PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListVirtualMachineScaleSetVMPublicIPAddressesPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListVirtualMachineScaleSetVMPublicIPAddressesPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListVirtualMachineScaleSetVMPublicIPAddressesPager) { + p.newListVirtualMachineScaleSetVMPublicIPAddressesPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPAddressesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if p.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPAddresses/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPAddressNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpAddressName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.UpdateTags(req.Context(), resourceGroupNameParam, publicIPAddressNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PublicIPAddress, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/publicipprefixes_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/publicipprefixes_server.go new file mode 100644 index 00000000000..e61dd837a9d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/publicipprefixes_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// PublicIPPrefixesServer is a fake server for instances of the armnetwork.PublicIPPrefixesClient type. +type PublicIPPrefixesServer struct { + // BeginCreateOrUpdate is the fake for method PublicIPPrefixesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, publicIPPrefixName string, parameters armnetwork.PublicIPPrefix, options *armnetwork.PublicIPPrefixesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.PublicIPPrefixesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method PublicIPPrefixesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, publicIPPrefixName string, options *armnetwork.PublicIPPrefixesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.PublicIPPrefixesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method PublicIPPrefixesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, publicIPPrefixName string, options *armnetwork.PublicIPPrefixesClientGetOptions) (resp azfake.Responder[armnetwork.PublicIPPrefixesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method PublicIPPrefixesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.PublicIPPrefixesClientListOptions) (resp azfake.PagerResponder[armnetwork.PublicIPPrefixesClientListResponse]) + + // NewListAllPager is the fake for method PublicIPPrefixesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.PublicIPPrefixesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.PublicIPPrefixesClientListAllResponse]) + + // UpdateTags is the fake for method PublicIPPrefixesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, publicIPPrefixName string, parameters armnetwork.TagsObject, options *armnetwork.PublicIPPrefixesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.PublicIPPrefixesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewPublicIPPrefixesServerTransport creates a new instance of PublicIPPrefixesServerTransport with the provided implementation. +// The returned PublicIPPrefixesServerTransport instance is connected to an instance of armnetwork.PublicIPPrefixesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewPublicIPPrefixesServerTransport(srv *PublicIPPrefixesServer) *PublicIPPrefixesServerTransport { + return &PublicIPPrefixesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.PublicIPPrefixesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.PublicIPPrefixesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPPrefixesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.PublicIPPrefixesClientListAllResponse]](), + } +} + +// PublicIPPrefixesServerTransport connects instances of armnetwork.PublicIPPrefixesClient to instances of PublicIPPrefixesServer. +// Don't use this type directly, use NewPublicIPPrefixesServerTransport instead. +type PublicIPPrefixesServerTransport struct { + srv *PublicIPPrefixesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.PublicIPPrefixesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.PublicIPPrefixesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.PublicIPPrefixesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.PublicIPPrefixesClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for PublicIPPrefixesServerTransport. +func (p *PublicIPPrefixesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "PublicIPPrefixesClient.BeginCreateOrUpdate": + resp, err = p.dispatchBeginCreateOrUpdate(req) + case "PublicIPPrefixesClient.BeginDelete": + resp, err = p.dispatchBeginDelete(req) + case "PublicIPPrefixesClient.Get": + resp, err = p.dispatchGet(req) + case "PublicIPPrefixesClient.NewListPager": + resp, err = p.dispatchNewListPager(req) + case "PublicIPPrefixesClient.NewListAllPager": + resp, err = p.dispatchNewListAllPager(req) + case "PublicIPPrefixesClient.UpdateTags": + resp, err = p.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (p *PublicIPPrefixesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if p.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := p.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PublicIPPrefix](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpPrefixName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, publicIPPrefixNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + p.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + p.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + p.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (p *PublicIPPrefixesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if p.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := p.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpPrefixName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.BeginDelete(req.Context(), resourceGroupNameParam, publicIPPrefixNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + p.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + p.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + p.beginDelete.remove(req) + } + + return resp, nil +} + +func (p *PublicIPPrefixesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if p.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpPrefixName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.PublicIPPrefixesClientGetOptions + if expandParam != nil { + options = &armnetwork.PublicIPPrefixesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := p.srv.Get(req.Context(), resourceGroupNameParam, publicIPPrefixNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PublicIPPrefix, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (p *PublicIPPrefixesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := p.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPPrefixes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := p.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + p.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.PublicIPPrefixesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + p.newListPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPPrefixesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if p.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := p.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPPrefixes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := p.srv.NewListAllPager(nil) + newListAllPager = &resp + p.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.PublicIPPrefixesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + p.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + p.newListAllPager.remove(req) + } + return resp, nil +} + +func (p *PublicIPPrefixesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if p.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/publicIPPrefixes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + publicIPPrefixNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("publicIpPrefixName")]) + if err != nil { + return nil, err + } + respr, errRespr := p.srv.UpdateTags(req.Context(), resourceGroupNameParam, publicIPPrefixNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).PublicIPPrefix, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/resourcenavigationlinks_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/resourcenavigationlinks_server.go new file mode 100644 index 00000000000..f18addf67fc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/resourcenavigationlinks_server.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ResourceNavigationLinksServer is a fake server for instances of the armnetwork.ResourceNavigationLinksClient type. +type ResourceNavigationLinksServer struct { + // List is the fake for method ResourceNavigationLinksClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *armnetwork.ResourceNavigationLinksClientListOptions) (resp azfake.Responder[armnetwork.ResourceNavigationLinksClientListResponse], errResp azfake.ErrorResponder) +} + +// NewResourceNavigationLinksServerTransport creates a new instance of ResourceNavigationLinksServerTransport with the provided implementation. +// The returned ResourceNavigationLinksServerTransport instance is connected to an instance of armnetwork.ResourceNavigationLinksClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewResourceNavigationLinksServerTransport(srv *ResourceNavigationLinksServer) *ResourceNavigationLinksServerTransport { + return &ResourceNavigationLinksServerTransport{srv: srv} +} + +// ResourceNavigationLinksServerTransport connects instances of armnetwork.ResourceNavigationLinksClient to instances of ResourceNavigationLinksServer. +// Don't use this type directly, use NewResourceNavigationLinksServerTransport instead. +type ResourceNavigationLinksServerTransport struct { + srv *ResourceNavigationLinksServer +} + +// Do implements the policy.Transporter interface for ResourceNavigationLinksServerTransport. +func (r *ResourceNavigationLinksServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ResourceNavigationLinksClient.List": + resp, err = r.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *ResourceNavigationLinksServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if r.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ResourceNavigationLinks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + subnetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("subnetName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.List(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, subnetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ResourceNavigationLinksListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routefilterrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routefilterrules_server.go new file mode 100644 index 00000000000..942b320459f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routefilterrules_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// RouteFilterRulesServer is a fake server for instances of the armnetwork.RouteFilterRulesClient type. +type RouteFilterRulesServer struct { + // BeginCreateOrUpdate is the fake for method RouteFilterRulesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters armnetwork.RouteFilterRule, options *armnetwork.RouteFilterRulesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.RouteFilterRulesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RouteFilterRulesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, routeFilterName string, ruleName string, options *armnetwork.RouteFilterRulesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.RouteFilterRulesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RouteFilterRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, routeFilterName string, ruleName string, options *armnetwork.RouteFilterRulesClientGetOptions) (resp azfake.Responder[armnetwork.RouteFilterRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByRouteFilterPager is the fake for method RouteFilterRulesClient.NewListByRouteFilterPager + // HTTP status codes to indicate success: http.StatusOK + NewListByRouteFilterPager func(resourceGroupName string, routeFilterName string, options *armnetwork.RouteFilterRulesClientListByRouteFilterOptions) (resp azfake.PagerResponder[armnetwork.RouteFilterRulesClientListByRouteFilterResponse]) +} + +// NewRouteFilterRulesServerTransport creates a new instance of RouteFilterRulesServerTransport with the provided implementation. +// The returned RouteFilterRulesServerTransport instance is connected to an instance of armnetwork.RouteFilterRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRouteFilterRulesServerTransport(srv *RouteFilterRulesServer) *RouteFilterRulesServerTransport { + return &RouteFilterRulesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.RouteFilterRulesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.RouteFilterRulesClientDeleteResponse]](), + newListByRouteFilterPager: newTracker[azfake.PagerResponder[armnetwork.RouteFilterRulesClientListByRouteFilterResponse]](), + } +} + +// RouteFilterRulesServerTransport connects instances of armnetwork.RouteFilterRulesClient to instances of RouteFilterRulesServer. +// Don't use this type directly, use NewRouteFilterRulesServerTransport instead. +type RouteFilterRulesServerTransport struct { + srv *RouteFilterRulesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.RouteFilterRulesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.RouteFilterRulesClientDeleteResponse]] + newListByRouteFilterPager *tracker[azfake.PagerResponder[armnetwork.RouteFilterRulesClientListByRouteFilterResponse]] +} + +// Do implements the policy.Transporter interface for RouteFilterRulesServerTransport. +func (r *RouteFilterRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RouteFilterRulesClient.BeginCreateOrUpdate": + resp, err = r.dispatchBeginCreateOrUpdate(req) + case "RouteFilterRulesClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RouteFilterRulesClient.Get": + resp, err = r.dispatchGet(req) + case "RouteFilterRulesClient.NewListByRouteFilterPager": + resp, err = r.dispatchNewListByRouteFilterPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RouteFilterRulesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if r.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := r.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeFilterRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.RouteFilterRule](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + ruleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, routeFilterNameParam, ruleNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + r.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + r.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + r.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (r *RouteFilterRulesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeFilterRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + ruleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, routeFilterNameParam, ruleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RouteFilterRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeFilterRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + ruleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ruleName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, routeFilterNameParam, ruleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RouteFilterRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RouteFilterRulesServerTransport) dispatchNewListByRouteFilterPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListByRouteFilterPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByRouteFilterPager not implemented")} + } + newListByRouteFilterPager := r.newListByRouteFilterPager.get(req) + if newListByRouteFilterPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeFilterRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + resp := r.srv.NewListByRouteFilterPager(resourceGroupNameParam, routeFilterNameParam, nil) + newListByRouteFilterPager = &resp + r.newListByRouteFilterPager.add(req, newListByRouteFilterPager) + server.PagerResponderInjectNextLinks(newListByRouteFilterPager, req, func(page *armnetwork.RouteFilterRulesClientListByRouteFilterResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByRouteFilterPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListByRouteFilterPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByRouteFilterPager) { + r.newListByRouteFilterPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routefilters_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routefilters_server.go new file mode 100644 index 00000000000..7f73ca50dbd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routefilters_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// RouteFiltersServer is a fake server for instances of the armnetwork.RouteFiltersClient type. +type RouteFiltersServer struct { + // BeginCreateOrUpdate is the fake for method RouteFiltersClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, routeFilterName string, routeFilterParameters armnetwork.RouteFilter, options *armnetwork.RouteFiltersClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.RouteFiltersClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RouteFiltersClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, routeFilterName string, options *armnetwork.RouteFiltersClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.RouteFiltersClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RouteFiltersClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, routeFilterName string, options *armnetwork.RouteFiltersClientGetOptions) (resp azfake.Responder[armnetwork.RouteFiltersClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method RouteFiltersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.RouteFiltersClientListOptions) (resp azfake.PagerResponder[armnetwork.RouteFiltersClientListResponse]) + + // NewListByResourceGroupPager is the fake for method RouteFiltersClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.RouteFiltersClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.RouteFiltersClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method RouteFiltersClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, routeFilterName string, parameters armnetwork.TagsObject, options *armnetwork.RouteFiltersClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.RouteFiltersClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewRouteFiltersServerTransport creates a new instance of RouteFiltersServerTransport with the provided implementation. +// The returned RouteFiltersServerTransport instance is connected to an instance of armnetwork.RouteFiltersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRouteFiltersServerTransport(srv *RouteFiltersServer) *RouteFiltersServerTransport { + return &RouteFiltersServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.RouteFiltersClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.RouteFiltersClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.RouteFiltersClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.RouteFiltersClientListByResourceGroupResponse]](), + } +} + +// RouteFiltersServerTransport connects instances of armnetwork.RouteFiltersClient to instances of RouteFiltersServer. +// Don't use this type directly, use NewRouteFiltersServerTransport instead. +type RouteFiltersServerTransport struct { + srv *RouteFiltersServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.RouteFiltersClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.RouteFiltersClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.RouteFiltersClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.RouteFiltersClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for RouteFiltersServerTransport. +func (r *RouteFiltersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RouteFiltersClient.BeginCreateOrUpdate": + resp, err = r.dispatchBeginCreateOrUpdate(req) + case "RouteFiltersClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RouteFiltersClient.Get": + resp, err = r.dispatchGet(req) + case "RouteFiltersClient.NewListPager": + resp, err = r.dispatchNewListPager(req) + case "RouteFiltersClient.NewListByResourceGroupPager": + resp, err = r.dispatchNewListByResourceGroupPager(req) + case "RouteFiltersClient.UpdateTags": + resp, err = r.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RouteFiltersServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if r.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := r.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.RouteFilter](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, routeFilterNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + r.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + r.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + r.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (r *RouteFiltersServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, routeFilterNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RouteFiltersServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.RouteFiltersClientGetOptions + if expandParam != nil { + options = &armnetwork.RouteFiltersClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, routeFilterNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RouteFilter, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RouteFiltersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := r.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := r.srv.NewListPager(nil) + newListPager = &resp + r.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.RouteFiltersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + r.newListPager.remove(req) + } + return resp, nil +} + +func (r *RouteFiltersServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := r.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := r.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + r.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.RouteFiltersClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + r.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (r *RouteFiltersServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if r.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeFilters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeFilterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeFilterName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.UpdateTags(req.Context(), resourceGroupNameParam, routeFilterNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RouteFilter, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routemaps_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routemaps_server.go new file mode 100644 index 00000000000..6030cc4f6c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routemaps_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// RouteMapsServer is a fake server for instances of the armnetwork.RouteMapsClient type. +type RouteMapsServer struct { + // BeginCreateOrUpdate is the fake for method RouteMapsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, routeMapName string, routeMapParameters armnetwork.RouteMap, options *armnetwork.RouteMapsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.RouteMapsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RouteMapsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, routeMapName string, options *armnetwork.RouteMapsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.RouteMapsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RouteMapsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, routeMapName string, options *armnetwork.RouteMapsClientGetOptions) (resp azfake.Responder[armnetwork.RouteMapsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method RouteMapsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualHubName string, options *armnetwork.RouteMapsClientListOptions) (resp azfake.PagerResponder[armnetwork.RouteMapsClientListResponse]) +} + +// NewRouteMapsServerTransport creates a new instance of RouteMapsServerTransport with the provided implementation. +// The returned RouteMapsServerTransport instance is connected to an instance of armnetwork.RouteMapsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRouteMapsServerTransport(srv *RouteMapsServer) *RouteMapsServerTransport { + return &RouteMapsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.RouteMapsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.RouteMapsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.RouteMapsClientListResponse]](), + } +} + +// RouteMapsServerTransport connects instances of armnetwork.RouteMapsClient to instances of RouteMapsServer. +// Don't use this type directly, use NewRouteMapsServerTransport instead. +type RouteMapsServerTransport struct { + srv *RouteMapsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.RouteMapsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.RouteMapsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.RouteMapsClientListResponse]] +} + +// Do implements the policy.Transporter interface for RouteMapsServerTransport. +func (r *RouteMapsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RouteMapsClient.BeginCreateOrUpdate": + resp, err = r.dispatchBeginCreateOrUpdate(req) + case "RouteMapsClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RouteMapsClient.Get": + resp, err = r.dispatchGet(req) + case "RouteMapsClient.NewListPager": + resp, err = r.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RouteMapsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if r.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := r.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeMaps/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.RouteMap](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeMapNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeMapName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeMapNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + r.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + r.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + r.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (r *RouteMapsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeMaps/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeMapNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeMapName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeMapNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RouteMapsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeMaps/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeMapNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeMapName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeMapNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RouteMap, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RouteMapsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := r.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeMaps` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + resp := r.srv.NewListPager(resourceGroupNameParam, virtualHubNameParam, nil) + newListPager = &resp + r.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.RouteMapsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + r.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routes_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routes_server.go new file mode 100644 index 00000000000..b26835a4381 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routes_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// RoutesServer is a fake server for instances of the armnetwork.RoutesClient type. +type RoutesServer struct { + // BeginCreateOrUpdate is the fake for method RoutesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters armnetwork.Route, options *armnetwork.RoutesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.RoutesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RoutesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, options *armnetwork.RoutesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.RoutesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RoutesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, options *armnetwork.RoutesClientGetOptions) (resp azfake.Responder[armnetwork.RoutesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method RoutesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, routeTableName string, options *armnetwork.RoutesClientListOptions) (resp azfake.PagerResponder[armnetwork.RoutesClientListResponse]) +} + +// NewRoutesServerTransport creates a new instance of RoutesServerTransport with the provided implementation. +// The returned RoutesServerTransport instance is connected to an instance of armnetwork.RoutesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRoutesServerTransport(srv *RoutesServer) *RoutesServerTransport { + return &RoutesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.RoutesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.RoutesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.RoutesClientListResponse]](), + } +} + +// RoutesServerTransport connects instances of armnetwork.RoutesClient to instances of RoutesServer. +// Don't use this type directly, use NewRoutesServerTransport instead. +type RoutesServerTransport struct { + srv *RoutesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.RoutesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.RoutesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.RoutesClientListResponse]] +} + +// Do implements the policy.Transporter interface for RoutesServerTransport. +func (r *RoutesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RoutesClient.BeginCreateOrUpdate": + resp, err = r.dispatchBeginCreateOrUpdate(req) + case "RoutesClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RoutesClient.Get": + resp, err = r.dispatchGet(req) + case "RoutesClient.NewListPager": + resp, err = r.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RoutesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if r.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := r.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.Route](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + routeNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, routeTableNameParam, routeNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + r.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + r.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + r.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (r *RoutesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + routeNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, routeTableNameParam, routeNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RoutesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routes/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + routeNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, routeTableNameParam, routeNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Route, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RoutesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := r.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + resp := r.srv.NewListPager(resourceGroupNameParam, routeTableNameParam, nil) + newListPager = &resp + r.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.RoutesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + r.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routetables_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routetables_server.go new file mode 100644 index 00000000000..ef2d864338a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routetables_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// RouteTablesServer is a fake server for instances of the armnetwork.RouteTablesClient type. +type RouteTablesServer struct { + // BeginCreateOrUpdate is the fake for method RouteTablesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, routeTableName string, parameters armnetwork.RouteTable, options *armnetwork.RouteTablesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.RouteTablesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RouteTablesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, routeTableName string, options *armnetwork.RouteTablesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.RouteTablesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RouteTablesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, routeTableName string, options *armnetwork.RouteTablesClientGetOptions) (resp azfake.Responder[armnetwork.RouteTablesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method RouteTablesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.RouteTablesClientListOptions) (resp azfake.PagerResponder[armnetwork.RouteTablesClientListResponse]) + + // NewListAllPager is the fake for method RouteTablesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.RouteTablesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.RouteTablesClientListAllResponse]) + + // UpdateTags is the fake for method RouteTablesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, routeTableName string, parameters armnetwork.TagsObject, options *armnetwork.RouteTablesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.RouteTablesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewRouteTablesServerTransport creates a new instance of RouteTablesServerTransport with the provided implementation. +// The returned RouteTablesServerTransport instance is connected to an instance of armnetwork.RouteTablesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRouteTablesServerTransport(srv *RouteTablesServer) *RouteTablesServerTransport { + return &RouteTablesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.RouteTablesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.RouteTablesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.RouteTablesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.RouteTablesClientListAllResponse]](), + } +} + +// RouteTablesServerTransport connects instances of armnetwork.RouteTablesClient to instances of RouteTablesServer. +// Don't use this type directly, use NewRouteTablesServerTransport instead. +type RouteTablesServerTransport struct { + srv *RouteTablesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.RouteTablesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.RouteTablesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.RouteTablesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.RouteTablesClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for RouteTablesServerTransport. +func (r *RouteTablesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RouteTablesClient.BeginCreateOrUpdate": + resp, err = r.dispatchBeginCreateOrUpdate(req) + case "RouteTablesClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RouteTablesClient.Get": + resp, err = r.dispatchGet(req) + case "RouteTablesClient.NewListPager": + resp, err = r.dispatchNewListPager(req) + case "RouteTablesClient.NewListAllPager": + resp, err = r.dispatchNewListAllPager(req) + case "RouteTablesClient.UpdateTags": + resp, err = r.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RouteTablesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if r.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := r.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.RouteTable](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, routeTableNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + r.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + r.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + r.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (r *RouteTablesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, routeTableNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RouteTablesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.RouteTablesClientGetOptions + if expandParam != nil { + options = &armnetwork.RouteTablesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, routeTableNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RouteTable, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RouteTablesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := r.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := r.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + r.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.RouteTablesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + r.newListPager.remove(req) + } + return resp, nil +} + +func (r *RouteTablesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := r.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := r.srv.NewListAllPager(nil) + newListAllPager = &resp + r.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.RouteTablesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + r.newListAllPager.remove(req) + } + return resp, nil +} + +func (r *RouteTablesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if r.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.UpdateTags(req.Context(), resourceGroupNameParam, routeTableNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RouteTable, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routingintent_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routingintent_server.go new file mode 100644 index 00000000000..cd4682e1796 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/routingintent_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// RoutingIntentServer is a fake server for instances of the armnetwork.RoutingIntentClient type. +type RoutingIntentServer struct { + // BeginCreateOrUpdate is the fake for method RoutingIntentClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, routingIntentName string, routingIntentParameters armnetwork.RoutingIntent, options *armnetwork.RoutingIntentClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.RoutingIntentClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method RoutingIntentClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, routingIntentName string, options *armnetwork.RoutingIntentClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.RoutingIntentClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method RoutingIntentClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, routingIntentName string, options *armnetwork.RoutingIntentClientGetOptions) (resp azfake.Responder[armnetwork.RoutingIntentClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method RoutingIntentClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualHubName string, options *armnetwork.RoutingIntentClientListOptions) (resp azfake.PagerResponder[armnetwork.RoutingIntentClientListResponse]) +} + +// NewRoutingIntentServerTransport creates a new instance of RoutingIntentServerTransport with the provided implementation. +// The returned RoutingIntentServerTransport instance is connected to an instance of armnetwork.RoutingIntentClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewRoutingIntentServerTransport(srv *RoutingIntentServer) *RoutingIntentServerTransport { + return &RoutingIntentServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.RoutingIntentClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.RoutingIntentClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.RoutingIntentClientListResponse]](), + } +} + +// RoutingIntentServerTransport connects instances of armnetwork.RoutingIntentClient to instances of RoutingIntentServer. +// Don't use this type directly, use NewRoutingIntentServerTransport instead. +type RoutingIntentServerTransport struct { + srv *RoutingIntentServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.RoutingIntentClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.RoutingIntentClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.RoutingIntentClientListResponse]] +} + +// Do implements the policy.Transporter interface for RoutingIntentServerTransport. +func (r *RoutingIntentServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "RoutingIntentClient.BeginCreateOrUpdate": + resp, err = r.dispatchBeginCreateOrUpdate(req) + case "RoutingIntentClient.BeginDelete": + resp, err = r.dispatchBeginDelete(req) + case "RoutingIntentClient.Get": + resp, err = r.dispatchGet(req) + case "RoutingIntentClient.NewListPager": + resp, err = r.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (r *RoutingIntentServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if r.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := r.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routingIntent/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.RoutingIntent](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routingIntentNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routingIntentName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, routingIntentNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + r.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + r.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + r.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (r *RoutingIntentServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if r.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := r.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routingIntent/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routingIntentNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routingIntentName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, routingIntentNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + r.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + r.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + r.beginDelete.remove(req) + } + + return resp, nil +} + +func (r *RoutingIntentServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if r.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routingIntent/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routingIntentNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routingIntentName")]) + if err != nil { + return nil, err + } + respr, errRespr := r.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, routingIntentNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).RoutingIntent, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (r *RoutingIntentServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if r.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := r.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routingIntent` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + resp := r.srv.NewListPager(resourceGroupNameParam, virtualHubNameParam, nil) + newListPager = &resp + r.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.RoutingIntentClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + r.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + r.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/scopeconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/scopeconnections_server.go new file mode 100644 index 00000000000..a60e45f1e66 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/scopeconnections_server.go @@ -0,0 +1,274 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// ScopeConnectionsServer is a fake server for instances of the armnetwork.ScopeConnectionsClient type. +type ScopeConnectionsServer struct { + // CreateOrUpdate is the fake for method ScopeConnectionsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, scopeConnectionName string, parameters armnetwork.ScopeConnection, options *armnetwork.ScopeConnectionsClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.ScopeConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method ScopeConnectionsClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, resourceGroupName string, networkManagerName string, scopeConnectionName string, options *armnetwork.ScopeConnectionsClientDeleteOptions) (resp azfake.Responder[armnetwork.ScopeConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ScopeConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, scopeConnectionName string, options *armnetwork.ScopeConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.ScopeConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ScopeConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkManagerName string, options *armnetwork.ScopeConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.ScopeConnectionsClientListResponse]) +} + +// NewScopeConnectionsServerTransport creates a new instance of ScopeConnectionsServerTransport with the provided implementation. +// The returned ScopeConnectionsServerTransport instance is connected to an instance of armnetwork.ScopeConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewScopeConnectionsServerTransport(srv *ScopeConnectionsServer) *ScopeConnectionsServerTransport { + return &ScopeConnectionsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.ScopeConnectionsClientListResponse]](), + } +} + +// ScopeConnectionsServerTransport connects instances of armnetwork.ScopeConnectionsClient to instances of ScopeConnectionsServer. +// Don't use this type directly, use NewScopeConnectionsServerTransport instead. +type ScopeConnectionsServerTransport struct { + srv *ScopeConnectionsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.ScopeConnectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for ScopeConnectionsServerTransport. +func (s *ScopeConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ScopeConnectionsClient.CreateOrUpdate": + resp, err = s.dispatchCreateOrUpdate(req) + case "ScopeConnectionsClient.Delete": + resp, err = s.dispatchDelete(req) + case "ScopeConnectionsClient.Get": + resp, err = s.dispatchGet(req) + case "ScopeConnectionsClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *ScopeConnectionsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/scopeConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ScopeConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + scopeConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("scopeConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, scopeConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ScopeConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *ScopeConnectionsServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if s.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/scopeConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + scopeConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("scopeConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Delete(req.Context(), resourceGroupNameParam, networkManagerNameParam, scopeConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *ScopeConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/scopeConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + scopeConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("scopeConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, scopeConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ScopeConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *ScopeConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/scopeConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.ScopeConnectionsClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.ScopeConnectionsClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := s.srv.NewListPager(resourceGroupNameParam, networkManagerNameParam, options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ScopeConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securityadminconfigurations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securityadminconfigurations_server.go new file mode 100644 index 00000000000..2daa46b34a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securityadminconfigurations_server.go @@ -0,0 +1,302 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// SecurityAdminConfigurationsServer is a fake server for instances of the armnetwork.SecurityAdminConfigurationsClient type. +type SecurityAdminConfigurationsServer struct { + // CreateOrUpdate is the fake for method SecurityAdminConfigurationsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, securityAdminConfiguration armnetwork.SecurityAdminConfiguration, options *armnetwork.SecurityAdminConfigurationsClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.SecurityAdminConfigurationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method SecurityAdminConfigurationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *armnetwork.SecurityAdminConfigurationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.SecurityAdminConfigurationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SecurityAdminConfigurationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *armnetwork.SecurityAdminConfigurationsClientGetOptions) (resp azfake.Responder[armnetwork.SecurityAdminConfigurationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SecurityAdminConfigurationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkManagerName string, options *armnetwork.SecurityAdminConfigurationsClientListOptions) (resp azfake.PagerResponder[armnetwork.SecurityAdminConfigurationsClientListResponse]) +} + +// NewSecurityAdminConfigurationsServerTransport creates a new instance of SecurityAdminConfigurationsServerTransport with the provided implementation. +// The returned SecurityAdminConfigurationsServerTransport instance is connected to an instance of armnetwork.SecurityAdminConfigurationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSecurityAdminConfigurationsServerTransport(srv *SecurityAdminConfigurationsServer) *SecurityAdminConfigurationsServerTransport { + return &SecurityAdminConfigurationsServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.SecurityAdminConfigurationsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.SecurityAdminConfigurationsClientListResponse]](), + } +} + +// SecurityAdminConfigurationsServerTransport connects instances of armnetwork.SecurityAdminConfigurationsClient to instances of SecurityAdminConfigurationsServer. +// Don't use this type directly, use NewSecurityAdminConfigurationsServerTransport instead. +type SecurityAdminConfigurationsServerTransport struct { + srv *SecurityAdminConfigurationsServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.SecurityAdminConfigurationsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.SecurityAdminConfigurationsClientListResponse]] +} + +// Do implements the policy.Transporter interface for SecurityAdminConfigurationsServerTransport. +func (s *SecurityAdminConfigurationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SecurityAdminConfigurationsClient.CreateOrUpdate": + resp, err = s.dispatchCreateOrUpdate(req) + case "SecurityAdminConfigurationsClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "SecurityAdminConfigurationsClient.Get": + resp, err = s.dispatchGet(req) + case "SecurityAdminConfigurationsClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SecurityAdminConfigurationsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SecurityAdminConfiguration](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityAdminConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SecurityAdminConfigurationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + forceUnescaped, err := url.QueryUnescape(qp.Get("force")) + if err != nil { + return nil, err + } + forceParam, err := parseOptional(forceUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + var options *armnetwork.SecurityAdminConfigurationsClientBeginDeleteOptions + if forceParam != nil { + options = &armnetwork.SecurityAdminConfigurationsClientBeginDeleteOptions{ + Force: forceParam, + } + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *SecurityAdminConfigurationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + configurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("configurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, configurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityAdminConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SecurityAdminConfigurationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityAdminConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.SecurityAdminConfigurationsClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.SecurityAdminConfigurationsClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := s.srv.NewListPager(resourceGroupNameParam, networkManagerNameParam, options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.SecurityAdminConfigurationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securitygroups_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securitygroups_server.go new file mode 100644 index 00000000000..b7019e4d101 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securitygroups_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// SecurityGroupsServer is a fake server for instances of the armnetwork.SecurityGroupsClient type. +type SecurityGroupsServer struct { + // BeginCreateOrUpdate is the fake for method SecurityGroupsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters armnetwork.SecurityGroup, options *armnetwork.SecurityGroupsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.SecurityGroupsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method SecurityGroupsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, options *armnetwork.SecurityGroupsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.SecurityGroupsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SecurityGroupsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, options *armnetwork.SecurityGroupsClientGetOptions) (resp azfake.Responder[armnetwork.SecurityGroupsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SecurityGroupsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.SecurityGroupsClientListOptions) (resp azfake.PagerResponder[armnetwork.SecurityGroupsClientListResponse]) + + // NewListAllPager is the fake for method SecurityGroupsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.SecurityGroupsClientListAllOptions) (resp azfake.PagerResponder[armnetwork.SecurityGroupsClientListAllResponse]) + + // UpdateTags is the fake for method SecurityGroupsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters armnetwork.TagsObject, options *armnetwork.SecurityGroupsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.SecurityGroupsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewSecurityGroupsServerTransport creates a new instance of SecurityGroupsServerTransport with the provided implementation. +// The returned SecurityGroupsServerTransport instance is connected to an instance of armnetwork.SecurityGroupsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSecurityGroupsServerTransport(srv *SecurityGroupsServer) *SecurityGroupsServerTransport { + return &SecurityGroupsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.SecurityGroupsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.SecurityGroupsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.SecurityGroupsClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.SecurityGroupsClientListAllResponse]](), + } +} + +// SecurityGroupsServerTransport connects instances of armnetwork.SecurityGroupsClient to instances of SecurityGroupsServer. +// Don't use this type directly, use NewSecurityGroupsServerTransport instead. +type SecurityGroupsServerTransport struct { + srv *SecurityGroupsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.SecurityGroupsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.SecurityGroupsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.SecurityGroupsClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.SecurityGroupsClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for SecurityGroupsServerTransport. +func (s *SecurityGroupsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SecurityGroupsClient.BeginCreateOrUpdate": + resp, err = s.dispatchBeginCreateOrUpdate(req) + case "SecurityGroupsClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "SecurityGroupsClient.Get": + resp, err = s.dispatchGet(req) + case "SecurityGroupsClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + case "SecurityGroupsClient.NewListAllPager": + resp, err = s.dispatchNewListAllPager(req) + case "SecurityGroupsClient.UpdateTags": + resp, err = s.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SecurityGroupsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := s.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SecurityGroup](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + s.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + s.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + s.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (s *SecurityGroupsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *SecurityGroupsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.SecurityGroupsClientGetOptions + if expandParam != nil { + options = &armnetwork.SecurityGroupsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SecurityGroupsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.SecurityGroupsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} + +func (s *SecurityGroupsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := s.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := s.srv.NewListAllPager(nil) + newListAllPager = &resp + s.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.SecurityGroupsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + s.newListAllPager.remove(req) + } + return resp, nil +} + +func (s *SecurityGroupsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if s.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.UpdateTags(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityGroup, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securitypartnerproviders_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securitypartnerproviders_server.go new file mode 100644 index 00000000000..40c931d4edd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securitypartnerproviders_server.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// SecurityPartnerProvidersServer is a fake server for instances of the armnetwork.SecurityPartnerProvidersClient type. +type SecurityPartnerProvidersServer struct { + // BeginCreateOrUpdate is the fake for method SecurityPartnerProvidersClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, parameters armnetwork.SecurityPartnerProvider, options *armnetwork.SecurityPartnerProvidersClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.SecurityPartnerProvidersClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method SecurityPartnerProvidersClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, options *armnetwork.SecurityPartnerProvidersClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.SecurityPartnerProvidersClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SecurityPartnerProvidersClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, options *armnetwork.SecurityPartnerProvidersClientGetOptions) (resp azfake.Responder[armnetwork.SecurityPartnerProvidersClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SecurityPartnerProvidersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.SecurityPartnerProvidersClientListOptions) (resp azfake.PagerResponder[armnetwork.SecurityPartnerProvidersClientListResponse]) + + // NewListByResourceGroupPager is the fake for method SecurityPartnerProvidersClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.SecurityPartnerProvidersClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.SecurityPartnerProvidersClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method SecurityPartnerProvidersClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, parameters armnetwork.TagsObject, options *armnetwork.SecurityPartnerProvidersClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.SecurityPartnerProvidersClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewSecurityPartnerProvidersServerTransport creates a new instance of SecurityPartnerProvidersServerTransport with the provided implementation. +// The returned SecurityPartnerProvidersServerTransport instance is connected to an instance of armnetwork.SecurityPartnerProvidersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSecurityPartnerProvidersServerTransport(srv *SecurityPartnerProvidersServer) *SecurityPartnerProvidersServerTransport { + return &SecurityPartnerProvidersServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.SecurityPartnerProvidersClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.SecurityPartnerProvidersClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.SecurityPartnerProvidersClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.SecurityPartnerProvidersClientListByResourceGroupResponse]](), + } +} + +// SecurityPartnerProvidersServerTransport connects instances of armnetwork.SecurityPartnerProvidersClient to instances of SecurityPartnerProvidersServer. +// Don't use this type directly, use NewSecurityPartnerProvidersServerTransport instead. +type SecurityPartnerProvidersServerTransport struct { + srv *SecurityPartnerProvidersServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.SecurityPartnerProvidersClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.SecurityPartnerProvidersClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.SecurityPartnerProvidersClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.SecurityPartnerProvidersClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for SecurityPartnerProvidersServerTransport. +func (s *SecurityPartnerProvidersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SecurityPartnerProvidersClient.BeginCreateOrUpdate": + resp, err = s.dispatchBeginCreateOrUpdate(req) + case "SecurityPartnerProvidersClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "SecurityPartnerProvidersClient.Get": + resp, err = s.dispatchGet(req) + case "SecurityPartnerProvidersClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + case "SecurityPartnerProvidersClient.NewListByResourceGroupPager": + resp, err = s.dispatchNewListByResourceGroupPager(req) + case "SecurityPartnerProvidersClient.UpdateTags": + resp, err = s.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SecurityPartnerProvidersServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := s.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/securityPartnerProviders/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SecurityPartnerProvider](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + securityPartnerProviderNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("securityPartnerProviderName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, securityPartnerProviderNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + s.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + s.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + s.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (s *SecurityPartnerProvidersServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/securityPartnerProviders/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + securityPartnerProviderNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("securityPartnerProviderName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, securityPartnerProviderNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *SecurityPartnerProvidersServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/securityPartnerProviders/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + securityPartnerProviderNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("securityPartnerProviderName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, securityPartnerProviderNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityPartnerProvider, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SecurityPartnerProvidersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/securityPartnerProviders` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := s.srv.NewListPager(nil) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.SecurityPartnerProvidersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} + +func (s *SecurityPartnerProvidersServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := s.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/securityPartnerProviders` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + s.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.SecurityPartnerProvidersClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + s.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (s *SecurityPartnerProvidersServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if s.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/securityPartnerProviders/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + securityPartnerProviderNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("securityPartnerProviderName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.UpdateTags(req.Context(), resourceGroupNameParam, securityPartnerProviderNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityPartnerProvider, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securityrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securityrules_server.go new file mode 100644 index 00000000000..b295e6366a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/securityrules_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// SecurityRulesServer is a fake server for instances of the armnetwork.SecurityRulesClient type. +type SecurityRulesServer struct { + // BeginCreateOrUpdate is the fake for method SecurityRulesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters armnetwork.SecurityRule, options *armnetwork.SecurityRulesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.SecurityRulesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method SecurityRulesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, options *armnetwork.SecurityRulesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.SecurityRulesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SecurityRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, options *armnetwork.SecurityRulesClientGetOptions) (resp azfake.Responder[armnetwork.SecurityRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SecurityRulesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkSecurityGroupName string, options *armnetwork.SecurityRulesClientListOptions) (resp azfake.PagerResponder[armnetwork.SecurityRulesClientListResponse]) +} + +// NewSecurityRulesServerTransport creates a new instance of SecurityRulesServerTransport with the provided implementation. +// The returned SecurityRulesServerTransport instance is connected to an instance of armnetwork.SecurityRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSecurityRulesServerTransport(srv *SecurityRulesServer) *SecurityRulesServerTransport { + return &SecurityRulesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.SecurityRulesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.SecurityRulesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.SecurityRulesClientListResponse]](), + } +} + +// SecurityRulesServerTransport connects instances of armnetwork.SecurityRulesClient to instances of SecurityRulesServer. +// Don't use this type directly, use NewSecurityRulesServerTransport instead. +type SecurityRulesServerTransport struct { + srv *SecurityRulesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.SecurityRulesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.SecurityRulesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.SecurityRulesClientListResponse]] +} + +// Do implements the policy.Transporter interface for SecurityRulesServerTransport. +func (s *SecurityRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SecurityRulesClient.BeginCreateOrUpdate": + resp, err = s.dispatchBeginCreateOrUpdate(req) + case "SecurityRulesClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "SecurityRulesClient.Get": + resp, err = s.dispatchGet(req) + case "SecurityRulesClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SecurityRulesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := s.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SecurityRule](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + securityRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("securityRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, securityRuleNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + s.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + s.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + s.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (s *SecurityRulesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + securityRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("securityRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, securityRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *SecurityRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + securityRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("securityRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, networkSecurityGroupNameParam, securityRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SecurityRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SecurityRulesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkSecurityGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkSecurityGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkSecurityGroupName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListPager(resourceGroupNameParam, networkSecurityGroupNameParam, nil) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.SecurityRulesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/server_factory.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/server_factory.go new file mode 100644 index 00000000000..00ddd9d46ce --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/server_factory.go @@ -0,0 +1,949 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strings" + "sync" +) + +// ServerFactory is a fake server for instances of the armnetwork.ClientFactory type. +type ServerFactory struct { + AdminRuleCollectionsServer AdminRuleCollectionsServer + AdminRulesServer AdminRulesServer + ApplicationGatewayPrivateEndpointConnectionsServer ApplicationGatewayPrivateEndpointConnectionsServer + ApplicationGatewayPrivateLinkResourcesServer ApplicationGatewayPrivateLinkResourcesServer + ApplicationGatewayWafDynamicManifestsServer ApplicationGatewayWafDynamicManifestsServer + ApplicationGatewayWafDynamicManifestsDefaultServer ApplicationGatewayWafDynamicManifestsDefaultServer + ApplicationGatewaysServer ApplicationGatewaysServer + ApplicationSecurityGroupsServer ApplicationSecurityGroupsServer + AvailableDelegationsServer AvailableDelegationsServer + AvailableEndpointServicesServer AvailableEndpointServicesServer + AvailablePrivateEndpointTypesServer AvailablePrivateEndpointTypesServer + AvailableResourceGroupDelegationsServer AvailableResourceGroupDelegationsServer + AvailableServiceAliasesServer AvailableServiceAliasesServer + AzureFirewallFqdnTagsServer AzureFirewallFqdnTagsServer + AzureFirewallsServer AzureFirewallsServer + BastionHostsServer BastionHostsServer + BgpServiceCommunitiesServer BgpServiceCommunitiesServer + ConfigurationPolicyGroupsServer ConfigurationPolicyGroupsServer + ConnectionMonitorsServer ConnectionMonitorsServer + ConnectivityConfigurationsServer ConnectivityConfigurationsServer + CustomIPPrefixesServer CustomIPPrefixesServer + DdosCustomPoliciesServer DdosCustomPoliciesServer + DdosProtectionPlansServer DdosProtectionPlansServer + DefaultSecurityRulesServer DefaultSecurityRulesServer + DscpConfigurationServer DscpConfigurationServer + ExpressRouteCircuitAuthorizationsServer ExpressRouteCircuitAuthorizationsServer + ExpressRouteCircuitConnectionsServer ExpressRouteCircuitConnectionsServer + ExpressRouteCircuitPeeringsServer ExpressRouteCircuitPeeringsServer + ExpressRouteCircuitsServer ExpressRouteCircuitsServer + ExpressRouteConnectionsServer ExpressRouteConnectionsServer + ExpressRouteCrossConnectionPeeringsServer ExpressRouteCrossConnectionPeeringsServer + ExpressRouteCrossConnectionsServer ExpressRouteCrossConnectionsServer + ExpressRouteGatewaysServer ExpressRouteGatewaysServer + ExpressRouteLinksServer ExpressRouteLinksServer + ExpressRoutePortAuthorizationsServer ExpressRoutePortAuthorizationsServer + ExpressRoutePortsServer ExpressRoutePortsServer + ExpressRoutePortsLocationsServer ExpressRoutePortsLocationsServer + ExpressRouteProviderPortsLocationServer ExpressRouteProviderPortsLocationServer + ExpressRouteServiceProvidersServer ExpressRouteServiceProvidersServer + FirewallPoliciesServer FirewallPoliciesServer + FirewallPolicyIdpsSignaturesServer FirewallPolicyIdpsSignaturesServer + FirewallPolicyIdpsSignaturesFilterValuesServer FirewallPolicyIdpsSignaturesFilterValuesServer + FirewallPolicyIdpsSignaturesOverridesServer FirewallPolicyIdpsSignaturesOverridesServer + FirewallPolicyRuleCollectionGroupsServer FirewallPolicyRuleCollectionGroupsServer + FlowLogsServer FlowLogsServer + GroupsServer GroupsServer + HubRouteTablesServer HubRouteTablesServer + HubVirtualNetworkConnectionsServer HubVirtualNetworkConnectionsServer + IPAllocationsServer IPAllocationsServer + IPGroupsServer IPGroupsServer + InboundNatRulesServer InboundNatRulesServer + InboundSecurityRuleServer InboundSecurityRuleServer + InterfaceIPConfigurationsServer InterfaceIPConfigurationsServer + InterfaceLoadBalancersServer InterfaceLoadBalancersServer + InterfaceTapConfigurationsServer InterfaceTapConfigurationsServer + InterfacesServer InterfacesServer + LoadBalancerBackendAddressPoolsServer LoadBalancerBackendAddressPoolsServer + LoadBalancerFrontendIPConfigurationsServer LoadBalancerFrontendIPConfigurationsServer + LoadBalancerLoadBalancingRulesServer LoadBalancerLoadBalancingRulesServer + LoadBalancerNetworkInterfacesServer LoadBalancerNetworkInterfacesServer + LoadBalancerOutboundRulesServer LoadBalancerOutboundRulesServer + LoadBalancerProbesServer LoadBalancerProbesServer + LoadBalancersServer LoadBalancersServer + LocalNetworkGatewaysServer LocalNetworkGatewaysServer + ManagementServer ManagementServer + ManagementGroupNetworkManagerConnectionsServer ManagementGroupNetworkManagerConnectionsServer + ManagerCommitsServer ManagerCommitsServer + ManagerDeploymentStatusServer ManagerDeploymentStatusServer + ManagersServer ManagersServer + NatGatewaysServer NatGatewaysServer + NatRulesServer NatRulesServer + OperationsServer OperationsServer + P2SVPNGatewaysServer P2SVPNGatewaysServer + PacketCapturesServer PacketCapturesServer + PeerExpressRouteCircuitConnectionsServer PeerExpressRouteCircuitConnectionsServer + PrivateDNSZoneGroupsServer PrivateDNSZoneGroupsServer + PrivateEndpointsServer PrivateEndpointsServer + PrivateLinkServicesServer PrivateLinkServicesServer + ProfilesServer ProfilesServer + PublicIPAddressesServer PublicIPAddressesServer + PublicIPPrefixesServer PublicIPPrefixesServer + ResourceNavigationLinksServer ResourceNavigationLinksServer + RouteFilterRulesServer RouteFilterRulesServer + RouteFiltersServer RouteFiltersServer + RouteMapsServer RouteMapsServer + RouteTablesServer RouteTablesServer + RoutesServer RoutesServer + RoutingIntentServer RoutingIntentServer + ScopeConnectionsServer ScopeConnectionsServer + SecurityAdminConfigurationsServer SecurityAdminConfigurationsServer + SecurityGroupsServer SecurityGroupsServer + SecurityPartnerProvidersServer SecurityPartnerProvidersServer + SecurityRulesServer SecurityRulesServer + ServiceAssociationLinksServer ServiceAssociationLinksServer + ServiceEndpointPoliciesServer ServiceEndpointPoliciesServer + ServiceEndpointPolicyDefinitionsServer ServiceEndpointPolicyDefinitionsServer + ServiceTagInformationServer ServiceTagInformationServer + ServiceTagsServer ServiceTagsServer + StaticMembersServer StaticMembersServer + SubnetsServer SubnetsServer + SubscriptionNetworkManagerConnectionsServer SubscriptionNetworkManagerConnectionsServer + UsagesServer UsagesServer + VPNConnectionsServer VPNConnectionsServer + VPNGatewaysServer VPNGatewaysServer + VPNLinkConnectionsServer VPNLinkConnectionsServer + VPNServerConfigurationsAssociatedWithVirtualWanServer VPNServerConfigurationsAssociatedWithVirtualWanServer + VPNServerConfigurationsServer VPNServerConfigurationsServer + VPNSiteLinkConnectionsServer VPNSiteLinkConnectionsServer + VPNSiteLinksServer VPNSiteLinksServer + VPNSitesServer VPNSitesServer + VPNSitesConfigurationServer VPNSitesConfigurationServer + VipSwapServer VipSwapServer + VirtualApplianceConnectionsServer VirtualApplianceConnectionsServer + VirtualApplianceSKUsServer VirtualApplianceSKUsServer + VirtualApplianceSitesServer VirtualApplianceSitesServer + VirtualAppliancesServer VirtualAppliancesServer + VirtualHubBgpConnectionServer VirtualHubBgpConnectionServer + VirtualHubBgpConnectionsServer VirtualHubBgpConnectionsServer + VirtualHubIPConfigurationServer VirtualHubIPConfigurationServer + VirtualHubRouteTableV2SServer VirtualHubRouteTableV2SServer + VirtualHubsServer VirtualHubsServer + VirtualNetworkGatewayConnectionsServer VirtualNetworkGatewayConnectionsServer + VirtualNetworkGatewayNatRulesServer VirtualNetworkGatewayNatRulesServer + VirtualNetworkGatewaysServer VirtualNetworkGatewaysServer + VirtualNetworkPeeringsServer VirtualNetworkPeeringsServer + VirtualNetworkTapsServer VirtualNetworkTapsServer + VirtualNetworksServer VirtualNetworksServer + VirtualRouterPeeringsServer VirtualRouterPeeringsServer + VirtualRoutersServer VirtualRoutersServer + VirtualWansServer VirtualWansServer + WatchersServer WatchersServer + WebApplicationFirewallPoliciesServer WebApplicationFirewallPoliciesServer + WebCategoriesServer WebCategoriesServer +} + +// NewServerFactoryTransport creates a new instance of ServerFactoryTransport with the provided implementation. +// The returned ServerFactoryTransport instance is connected to an instance of armnetwork.ClientFactory via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewServerFactoryTransport(srv *ServerFactory) *ServerFactoryTransport { + return &ServerFactoryTransport{ + srv: srv, + } +} + +// ServerFactoryTransport connects instances of armnetwork.ClientFactory to instances of ServerFactory. +// Don't use this type directly, use NewServerFactoryTransport instead. +type ServerFactoryTransport struct { + srv *ServerFactory + trMu sync.Mutex + trAdminRuleCollectionsServer *AdminRuleCollectionsServerTransport + trAdminRulesServer *AdminRulesServerTransport + trApplicationGatewayPrivateEndpointConnectionsServer *ApplicationGatewayPrivateEndpointConnectionsServerTransport + trApplicationGatewayPrivateLinkResourcesServer *ApplicationGatewayPrivateLinkResourcesServerTransport + trApplicationGatewayWafDynamicManifestsServer *ApplicationGatewayWafDynamicManifestsServerTransport + trApplicationGatewayWafDynamicManifestsDefaultServer *ApplicationGatewayWafDynamicManifestsDefaultServerTransport + trApplicationGatewaysServer *ApplicationGatewaysServerTransport + trApplicationSecurityGroupsServer *ApplicationSecurityGroupsServerTransport + trAvailableDelegationsServer *AvailableDelegationsServerTransport + trAvailableEndpointServicesServer *AvailableEndpointServicesServerTransport + trAvailablePrivateEndpointTypesServer *AvailablePrivateEndpointTypesServerTransport + trAvailableResourceGroupDelegationsServer *AvailableResourceGroupDelegationsServerTransport + trAvailableServiceAliasesServer *AvailableServiceAliasesServerTransport + trAzureFirewallFqdnTagsServer *AzureFirewallFqdnTagsServerTransport + trAzureFirewallsServer *AzureFirewallsServerTransport + trBastionHostsServer *BastionHostsServerTransport + trBgpServiceCommunitiesServer *BgpServiceCommunitiesServerTransport + trConfigurationPolicyGroupsServer *ConfigurationPolicyGroupsServerTransport + trConnectionMonitorsServer *ConnectionMonitorsServerTransport + trConnectivityConfigurationsServer *ConnectivityConfigurationsServerTransport + trCustomIPPrefixesServer *CustomIPPrefixesServerTransport + trDdosCustomPoliciesServer *DdosCustomPoliciesServerTransport + trDdosProtectionPlansServer *DdosProtectionPlansServerTransport + trDefaultSecurityRulesServer *DefaultSecurityRulesServerTransport + trDscpConfigurationServer *DscpConfigurationServerTransport + trExpressRouteCircuitAuthorizationsServer *ExpressRouteCircuitAuthorizationsServerTransport + trExpressRouteCircuitConnectionsServer *ExpressRouteCircuitConnectionsServerTransport + trExpressRouteCircuitPeeringsServer *ExpressRouteCircuitPeeringsServerTransport + trExpressRouteCircuitsServer *ExpressRouteCircuitsServerTransport + trExpressRouteConnectionsServer *ExpressRouteConnectionsServerTransport + trExpressRouteCrossConnectionPeeringsServer *ExpressRouteCrossConnectionPeeringsServerTransport + trExpressRouteCrossConnectionsServer *ExpressRouteCrossConnectionsServerTransport + trExpressRouteGatewaysServer *ExpressRouteGatewaysServerTransport + trExpressRouteLinksServer *ExpressRouteLinksServerTransport + trExpressRoutePortAuthorizationsServer *ExpressRoutePortAuthorizationsServerTransport + trExpressRoutePortsServer *ExpressRoutePortsServerTransport + trExpressRoutePortsLocationsServer *ExpressRoutePortsLocationsServerTransport + trExpressRouteProviderPortsLocationServer *ExpressRouteProviderPortsLocationServerTransport + trExpressRouteServiceProvidersServer *ExpressRouteServiceProvidersServerTransport + trFirewallPoliciesServer *FirewallPoliciesServerTransport + trFirewallPolicyIdpsSignaturesServer *FirewallPolicyIdpsSignaturesServerTransport + trFirewallPolicyIdpsSignaturesFilterValuesServer *FirewallPolicyIdpsSignaturesFilterValuesServerTransport + trFirewallPolicyIdpsSignaturesOverridesServer *FirewallPolicyIdpsSignaturesOverridesServerTransport + trFirewallPolicyRuleCollectionGroupsServer *FirewallPolicyRuleCollectionGroupsServerTransport + trFlowLogsServer *FlowLogsServerTransport + trGroupsServer *GroupsServerTransport + trHubRouteTablesServer *HubRouteTablesServerTransport + trHubVirtualNetworkConnectionsServer *HubVirtualNetworkConnectionsServerTransport + trIPAllocationsServer *IPAllocationsServerTransport + trIPGroupsServer *IPGroupsServerTransport + trInboundNatRulesServer *InboundNatRulesServerTransport + trInboundSecurityRuleServer *InboundSecurityRuleServerTransport + trInterfaceIPConfigurationsServer *InterfaceIPConfigurationsServerTransport + trInterfaceLoadBalancersServer *InterfaceLoadBalancersServerTransport + trInterfaceTapConfigurationsServer *InterfaceTapConfigurationsServerTransport + trInterfacesServer *InterfacesServerTransport + trLoadBalancerBackendAddressPoolsServer *LoadBalancerBackendAddressPoolsServerTransport + trLoadBalancerFrontendIPConfigurationsServer *LoadBalancerFrontendIPConfigurationsServerTransport + trLoadBalancerLoadBalancingRulesServer *LoadBalancerLoadBalancingRulesServerTransport + trLoadBalancerNetworkInterfacesServer *LoadBalancerNetworkInterfacesServerTransport + trLoadBalancerOutboundRulesServer *LoadBalancerOutboundRulesServerTransport + trLoadBalancerProbesServer *LoadBalancerProbesServerTransport + trLoadBalancersServer *LoadBalancersServerTransport + trLocalNetworkGatewaysServer *LocalNetworkGatewaysServerTransport + trManagementServer *ManagementServerTransport + trManagementGroupNetworkManagerConnectionsServer *ManagementGroupNetworkManagerConnectionsServerTransport + trManagerCommitsServer *ManagerCommitsServerTransport + trManagerDeploymentStatusServer *ManagerDeploymentStatusServerTransport + trManagersServer *ManagersServerTransport + trNatGatewaysServer *NatGatewaysServerTransport + trNatRulesServer *NatRulesServerTransport + trOperationsServer *OperationsServerTransport + trP2SVPNGatewaysServer *P2SVPNGatewaysServerTransport + trPacketCapturesServer *PacketCapturesServerTransport + trPeerExpressRouteCircuitConnectionsServer *PeerExpressRouteCircuitConnectionsServerTransport + trPrivateDNSZoneGroupsServer *PrivateDNSZoneGroupsServerTransport + trPrivateEndpointsServer *PrivateEndpointsServerTransport + trPrivateLinkServicesServer *PrivateLinkServicesServerTransport + trProfilesServer *ProfilesServerTransport + trPublicIPAddressesServer *PublicIPAddressesServerTransport + trPublicIPPrefixesServer *PublicIPPrefixesServerTransport + trResourceNavigationLinksServer *ResourceNavigationLinksServerTransport + trRouteFilterRulesServer *RouteFilterRulesServerTransport + trRouteFiltersServer *RouteFiltersServerTransport + trRouteMapsServer *RouteMapsServerTransport + trRouteTablesServer *RouteTablesServerTransport + trRoutesServer *RoutesServerTransport + trRoutingIntentServer *RoutingIntentServerTransport + trScopeConnectionsServer *ScopeConnectionsServerTransport + trSecurityAdminConfigurationsServer *SecurityAdminConfigurationsServerTransport + trSecurityGroupsServer *SecurityGroupsServerTransport + trSecurityPartnerProvidersServer *SecurityPartnerProvidersServerTransport + trSecurityRulesServer *SecurityRulesServerTransport + trServiceAssociationLinksServer *ServiceAssociationLinksServerTransport + trServiceEndpointPoliciesServer *ServiceEndpointPoliciesServerTransport + trServiceEndpointPolicyDefinitionsServer *ServiceEndpointPolicyDefinitionsServerTransport + trServiceTagInformationServer *ServiceTagInformationServerTransport + trServiceTagsServer *ServiceTagsServerTransport + trStaticMembersServer *StaticMembersServerTransport + trSubnetsServer *SubnetsServerTransport + trSubscriptionNetworkManagerConnectionsServer *SubscriptionNetworkManagerConnectionsServerTransport + trUsagesServer *UsagesServerTransport + trVPNConnectionsServer *VPNConnectionsServerTransport + trVPNGatewaysServer *VPNGatewaysServerTransport + trVPNLinkConnectionsServer *VPNLinkConnectionsServerTransport + trVPNServerConfigurationsAssociatedWithVirtualWanServer *VPNServerConfigurationsAssociatedWithVirtualWanServerTransport + trVPNServerConfigurationsServer *VPNServerConfigurationsServerTransport + trVPNSiteLinkConnectionsServer *VPNSiteLinkConnectionsServerTransport + trVPNSiteLinksServer *VPNSiteLinksServerTransport + trVPNSitesServer *VPNSitesServerTransport + trVPNSitesConfigurationServer *VPNSitesConfigurationServerTransport + trVipSwapServer *VipSwapServerTransport + trVirtualApplianceConnectionsServer *VirtualApplianceConnectionsServerTransport + trVirtualApplianceSKUsServer *VirtualApplianceSKUsServerTransport + trVirtualApplianceSitesServer *VirtualApplianceSitesServerTransport + trVirtualAppliancesServer *VirtualAppliancesServerTransport + trVirtualHubBgpConnectionServer *VirtualHubBgpConnectionServerTransport + trVirtualHubBgpConnectionsServer *VirtualHubBgpConnectionsServerTransport + trVirtualHubIPConfigurationServer *VirtualHubIPConfigurationServerTransport + trVirtualHubRouteTableV2SServer *VirtualHubRouteTableV2SServerTransport + trVirtualHubsServer *VirtualHubsServerTransport + trVirtualNetworkGatewayConnectionsServer *VirtualNetworkGatewayConnectionsServerTransport + trVirtualNetworkGatewayNatRulesServer *VirtualNetworkGatewayNatRulesServerTransport + trVirtualNetworkGatewaysServer *VirtualNetworkGatewaysServerTransport + trVirtualNetworkPeeringsServer *VirtualNetworkPeeringsServerTransport + trVirtualNetworkTapsServer *VirtualNetworkTapsServerTransport + trVirtualNetworksServer *VirtualNetworksServerTransport + trVirtualRouterPeeringsServer *VirtualRouterPeeringsServerTransport + trVirtualRoutersServer *VirtualRoutersServerTransport + trVirtualWansServer *VirtualWansServerTransport + trWatchersServer *WatchersServerTransport + trWebApplicationFirewallPoliciesServer *WebApplicationFirewallPoliciesServerTransport + trWebCategoriesServer *WebCategoriesServerTransport +} + +// Do implements the policy.Transporter interface for ServerFactoryTransport. +func (s *ServerFactoryTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + client := method[:strings.Index(method, ".")] + var resp *http.Response + var err error + + switch client { + case "AdminRuleCollectionsClient": + initServer(s, &s.trAdminRuleCollectionsServer, func() *AdminRuleCollectionsServerTransport { + return NewAdminRuleCollectionsServerTransport(&s.srv.AdminRuleCollectionsServer) + }) + resp, err = s.trAdminRuleCollectionsServer.Do(req) + case "AdminRulesClient": + initServer(s, &s.trAdminRulesServer, func() *AdminRulesServerTransport { return NewAdminRulesServerTransport(&s.srv.AdminRulesServer) }) + resp, err = s.trAdminRulesServer.Do(req) + case "ApplicationGatewayPrivateEndpointConnectionsClient": + initServer(s, &s.trApplicationGatewayPrivateEndpointConnectionsServer, func() *ApplicationGatewayPrivateEndpointConnectionsServerTransport { + return NewApplicationGatewayPrivateEndpointConnectionsServerTransport(&s.srv.ApplicationGatewayPrivateEndpointConnectionsServer) + }) + resp, err = s.trApplicationGatewayPrivateEndpointConnectionsServer.Do(req) + case "ApplicationGatewayPrivateLinkResourcesClient": + initServer(s, &s.trApplicationGatewayPrivateLinkResourcesServer, func() *ApplicationGatewayPrivateLinkResourcesServerTransport { + return NewApplicationGatewayPrivateLinkResourcesServerTransport(&s.srv.ApplicationGatewayPrivateLinkResourcesServer) + }) + resp, err = s.trApplicationGatewayPrivateLinkResourcesServer.Do(req) + case "ApplicationGatewayWafDynamicManifestsClient": + initServer(s, &s.trApplicationGatewayWafDynamicManifestsServer, func() *ApplicationGatewayWafDynamicManifestsServerTransport { + return NewApplicationGatewayWafDynamicManifestsServerTransport(&s.srv.ApplicationGatewayWafDynamicManifestsServer) + }) + resp, err = s.trApplicationGatewayWafDynamicManifestsServer.Do(req) + case "ApplicationGatewayWafDynamicManifestsDefaultClient": + initServer(s, &s.trApplicationGatewayWafDynamicManifestsDefaultServer, func() *ApplicationGatewayWafDynamicManifestsDefaultServerTransport { + return NewApplicationGatewayWafDynamicManifestsDefaultServerTransport(&s.srv.ApplicationGatewayWafDynamicManifestsDefaultServer) + }) + resp, err = s.trApplicationGatewayWafDynamicManifestsDefaultServer.Do(req) + case "ApplicationGatewaysClient": + initServer(s, &s.trApplicationGatewaysServer, func() *ApplicationGatewaysServerTransport { + return NewApplicationGatewaysServerTransport(&s.srv.ApplicationGatewaysServer) + }) + resp, err = s.trApplicationGatewaysServer.Do(req) + case "ApplicationSecurityGroupsClient": + initServer(s, &s.trApplicationSecurityGroupsServer, func() *ApplicationSecurityGroupsServerTransport { + return NewApplicationSecurityGroupsServerTransport(&s.srv.ApplicationSecurityGroupsServer) + }) + resp, err = s.trApplicationSecurityGroupsServer.Do(req) + case "AvailableDelegationsClient": + initServer(s, &s.trAvailableDelegationsServer, func() *AvailableDelegationsServerTransport { + return NewAvailableDelegationsServerTransport(&s.srv.AvailableDelegationsServer) + }) + resp, err = s.trAvailableDelegationsServer.Do(req) + case "AvailableEndpointServicesClient": + initServer(s, &s.trAvailableEndpointServicesServer, func() *AvailableEndpointServicesServerTransport { + return NewAvailableEndpointServicesServerTransport(&s.srv.AvailableEndpointServicesServer) + }) + resp, err = s.trAvailableEndpointServicesServer.Do(req) + case "AvailablePrivateEndpointTypesClient": + initServer(s, &s.trAvailablePrivateEndpointTypesServer, func() *AvailablePrivateEndpointTypesServerTransport { + return NewAvailablePrivateEndpointTypesServerTransport(&s.srv.AvailablePrivateEndpointTypesServer) + }) + resp, err = s.trAvailablePrivateEndpointTypesServer.Do(req) + case "AvailableResourceGroupDelegationsClient": + initServer(s, &s.trAvailableResourceGroupDelegationsServer, func() *AvailableResourceGroupDelegationsServerTransport { + return NewAvailableResourceGroupDelegationsServerTransport(&s.srv.AvailableResourceGroupDelegationsServer) + }) + resp, err = s.trAvailableResourceGroupDelegationsServer.Do(req) + case "AvailableServiceAliasesClient": + initServer(s, &s.trAvailableServiceAliasesServer, func() *AvailableServiceAliasesServerTransport { + return NewAvailableServiceAliasesServerTransport(&s.srv.AvailableServiceAliasesServer) + }) + resp, err = s.trAvailableServiceAliasesServer.Do(req) + case "AzureFirewallFqdnTagsClient": + initServer(s, &s.trAzureFirewallFqdnTagsServer, func() *AzureFirewallFqdnTagsServerTransport { + return NewAzureFirewallFqdnTagsServerTransport(&s.srv.AzureFirewallFqdnTagsServer) + }) + resp, err = s.trAzureFirewallFqdnTagsServer.Do(req) + case "AzureFirewallsClient": + initServer(s, &s.trAzureFirewallsServer, func() *AzureFirewallsServerTransport { + return NewAzureFirewallsServerTransport(&s.srv.AzureFirewallsServer) + }) + resp, err = s.trAzureFirewallsServer.Do(req) + case "BastionHostsClient": + initServer(s, &s.trBastionHostsServer, func() *BastionHostsServerTransport { return NewBastionHostsServerTransport(&s.srv.BastionHostsServer) }) + resp, err = s.trBastionHostsServer.Do(req) + case "BgpServiceCommunitiesClient": + initServer(s, &s.trBgpServiceCommunitiesServer, func() *BgpServiceCommunitiesServerTransport { + return NewBgpServiceCommunitiesServerTransport(&s.srv.BgpServiceCommunitiesServer) + }) + resp, err = s.trBgpServiceCommunitiesServer.Do(req) + case "ConfigurationPolicyGroupsClient": + initServer(s, &s.trConfigurationPolicyGroupsServer, func() *ConfigurationPolicyGroupsServerTransport { + return NewConfigurationPolicyGroupsServerTransport(&s.srv.ConfigurationPolicyGroupsServer) + }) + resp, err = s.trConfigurationPolicyGroupsServer.Do(req) + case "ConnectionMonitorsClient": + initServer(s, &s.trConnectionMonitorsServer, func() *ConnectionMonitorsServerTransport { + return NewConnectionMonitorsServerTransport(&s.srv.ConnectionMonitorsServer) + }) + resp, err = s.trConnectionMonitorsServer.Do(req) + case "ConnectivityConfigurationsClient": + initServer(s, &s.trConnectivityConfigurationsServer, func() *ConnectivityConfigurationsServerTransport { + return NewConnectivityConfigurationsServerTransport(&s.srv.ConnectivityConfigurationsServer) + }) + resp, err = s.trConnectivityConfigurationsServer.Do(req) + case "CustomIPPrefixesClient": + initServer(s, &s.trCustomIPPrefixesServer, func() *CustomIPPrefixesServerTransport { + return NewCustomIPPrefixesServerTransport(&s.srv.CustomIPPrefixesServer) + }) + resp, err = s.trCustomIPPrefixesServer.Do(req) + case "DdosCustomPoliciesClient": + initServer(s, &s.trDdosCustomPoliciesServer, func() *DdosCustomPoliciesServerTransport { + return NewDdosCustomPoliciesServerTransport(&s.srv.DdosCustomPoliciesServer) + }) + resp, err = s.trDdosCustomPoliciesServer.Do(req) + case "DdosProtectionPlansClient": + initServer(s, &s.trDdosProtectionPlansServer, func() *DdosProtectionPlansServerTransport { + return NewDdosProtectionPlansServerTransport(&s.srv.DdosProtectionPlansServer) + }) + resp, err = s.trDdosProtectionPlansServer.Do(req) + case "DefaultSecurityRulesClient": + initServer(s, &s.trDefaultSecurityRulesServer, func() *DefaultSecurityRulesServerTransport { + return NewDefaultSecurityRulesServerTransport(&s.srv.DefaultSecurityRulesServer) + }) + resp, err = s.trDefaultSecurityRulesServer.Do(req) + case "DscpConfigurationClient": + initServer(s, &s.trDscpConfigurationServer, func() *DscpConfigurationServerTransport { + return NewDscpConfigurationServerTransport(&s.srv.DscpConfigurationServer) + }) + resp, err = s.trDscpConfigurationServer.Do(req) + case "ExpressRouteCircuitAuthorizationsClient": + initServer(s, &s.trExpressRouteCircuitAuthorizationsServer, func() *ExpressRouteCircuitAuthorizationsServerTransport { + return NewExpressRouteCircuitAuthorizationsServerTransport(&s.srv.ExpressRouteCircuitAuthorizationsServer) + }) + resp, err = s.trExpressRouteCircuitAuthorizationsServer.Do(req) + case "ExpressRouteCircuitConnectionsClient": + initServer(s, &s.trExpressRouteCircuitConnectionsServer, func() *ExpressRouteCircuitConnectionsServerTransport { + return NewExpressRouteCircuitConnectionsServerTransport(&s.srv.ExpressRouteCircuitConnectionsServer) + }) + resp, err = s.trExpressRouteCircuitConnectionsServer.Do(req) + case "ExpressRouteCircuitPeeringsClient": + initServer(s, &s.trExpressRouteCircuitPeeringsServer, func() *ExpressRouteCircuitPeeringsServerTransport { + return NewExpressRouteCircuitPeeringsServerTransport(&s.srv.ExpressRouteCircuitPeeringsServer) + }) + resp, err = s.trExpressRouteCircuitPeeringsServer.Do(req) + case "ExpressRouteCircuitsClient": + initServer(s, &s.trExpressRouteCircuitsServer, func() *ExpressRouteCircuitsServerTransport { + return NewExpressRouteCircuitsServerTransport(&s.srv.ExpressRouteCircuitsServer) + }) + resp, err = s.trExpressRouteCircuitsServer.Do(req) + case "ExpressRouteConnectionsClient": + initServer(s, &s.trExpressRouteConnectionsServer, func() *ExpressRouteConnectionsServerTransport { + return NewExpressRouteConnectionsServerTransport(&s.srv.ExpressRouteConnectionsServer) + }) + resp, err = s.trExpressRouteConnectionsServer.Do(req) + case "ExpressRouteCrossConnectionPeeringsClient": + initServer(s, &s.trExpressRouteCrossConnectionPeeringsServer, func() *ExpressRouteCrossConnectionPeeringsServerTransport { + return NewExpressRouteCrossConnectionPeeringsServerTransport(&s.srv.ExpressRouteCrossConnectionPeeringsServer) + }) + resp, err = s.trExpressRouteCrossConnectionPeeringsServer.Do(req) + case "ExpressRouteCrossConnectionsClient": + initServer(s, &s.trExpressRouteCrossConnectionsServer, func() *ExpressRouteCrossConnectionsServerTransport { + return NewExpressRouteCrossConnectionsServerTransport(&s.srv.ExpressRouteCrossConnectionsServer) + }) + resp, err = s.trExpressRouteCrossConnectionsServer.Do(req) + case "ExpressRouteGatewaysClient": + initServer(s, &s.trExpressRouteGatewaysServer, func() *ExpressRouteGatewaysServerTransport { + return NewExpressRouteGatewaysServerTransport(&s.srv.ExpressRouteGatewaysServer) + }) + resp, err = s.trExpressRouteGatewaysServer.Do(req) + case "ExpressRouteLinksClient": + initServer(s, &s.trExpressRouteLinksServer, func() *ExpressRouteLinksServerTransport { + return NewExpressRouteLinksServerTransport(&s.srv.ExpressRouteLinksServer) + }) + resp, err = s.trExpressRouteLinksServer.Do(req) + case "ExpressRoutePortAuthorizationsClient": + initServer(s, &s.trExpressRoutePortAuthorizationsServer, func() *ExpressRoutePortAuthorizationsServerTransport { + return NewExpressRoutePortAuthorizationsServerTransport(&s.srv.ExpressRoutePortAuthorizationsServer) + }) + resp, err = s.trExpressRoutePortAuthorizationsServer.Do(req) + case "ExpressRoutePortsClient": + initServer(s, &s.trExpressRoutePortsServer, func() *ExpressRoutePortsServerTransport { + return NewExpressRoutePortsServerTransport(&s.srv.ExpressRoutePortsServer) + }) + resp, err = s.trExpressRoutePortsServer.Do(req) + case "ExpressRoutePortsLocationsClient": + initServer(s, &s.trExpressRoutePortsLocationsServer, func() *ExpressRoutePortsLocationsServerTransport { + return NewExpressRoutePortsLocationsServerTransport(&s.srv.ExpressRoutePortsLocationsServer) + }) + resp, err = s.trExpressRoutePortsLocationsServer.Do(req) + case "ExpressRouteProviderPortsLocationClient": + initServer(s, &s.trExpressRouteProviderPortsLocationServer, func() *ExpressRouteProviderPortsLocationServerTransport { + return NewExpressRouteProviderPortsLocationServerTransport(&s.srv.ExpressRouteProviderPortsLocationServer) + }) + resp, err = s.trExpressRouteProviderPortsLocationServer.Do(req) + case "ExpressRouteServiceProvidersClient": + initServer(s, &s.trExpressRouteServiceProvidersServer, func() *ExpressRouteServiceProvidersServerTransport { + return NewExpressRouteServiceProvidersServerTransport(&s.srv.ExpressRouteServiceProvidersServer) + }) + resp, err = s.trExpressRouteServiceProvidersServer.Do(req) + case "FirewallPoliciesClient": + initServer(s, &s.trFirewallPoliciesServer, func() *FirewallPoliciesServerTransport { + return NewFirewallPoliciesServerTransport(&s.srv.FirewallPoliciesServer) + }) + resp, err = s.trFirewallPoliciesServer.Do(req) + case "FirewallPolicyIdpsSignaturesClient": + initServer(s, &s.trFirewallPolicyIdpsSignaturesServer, func() *FirewallPolicyIdpsSignaturesServerTransport { + return NewFirewallPolicyIdpsSignaturesServerTransport(&s.srv.FirewallPolicyIdpsSignaturesServer) + }) + resp, err = s.trFirewallPolicyIdpsSignaturesServer.Do(req) + case "FirewallPolicyIdpsSignaturesFilterValuesClient": + initServer(s, &s.trFirewallPolicyIdpsSignaturesFilterValuesServer, func() *FirewallPolicyIdpsSignaturesFilterValuesServerTransport { + return NewFirewallPolicyIdpsSignaturesFilterValuesServerTransport(&s.srv.FirewallPolicyIdpsSignaturesFilterValuesServer) + }) + resp, err = s.trFirewallPolicyIdpsSignaturesFilterValuesServer.Do(req) + case "FirewallPolicyIdpsSignaturesOverridesClient": + initServer(s, &s.trFirewallPolicyIdpsSignaturesOverridesServer, func() *FirewallPolicyIdpsSignaturesOverridesServerTransport { + return NewFirewallPolicyIdpsSignaturesOverridesServerTransport(&s.srv.FirewallPolicyIdpsSignaturesOverridesServer) + }) + resp, err = s.trFirewallPolicyIdpsSignaturesOverridesServer.Do(req) + case "FirewallPolicyRuleCollectionGroupsClient": + initServer(s, &s.trFirewallPolicyRuleCollectionGroupsServer, func() *FirewallPolicyRuleCollectionGroupsServerTransport { + return NewFirewallPolicyRuleCollectionGroupsServerTransport(&s.srv.FirewallPolicyRuleCollectionGroupsServer) + }) + resp, err = s.trFirewallPolicyRuleCollectionGroupsServer.Do(req) + case "FlowLogsClient": + initServer(s, &s.trFlowLogsServer, func() *FlowLogsServerTransport { return NewFlowLogsServerTransport(&s.srv.FlowLogsServer) }) + resp, err = s.trFlowLogsServer.Do(req) + case "GroupsClient": + initServer(s, &s.trGroupsServer, func() *GroupsServerTransport { return NewGroupsServerTransport(&s.srv.GroupsServer) }) + resp, err = s.trGroupsServer.Do(req) + case "HubRouteTablesClient": + initServer(s, &s.trHubRouteTablesServer, func() *HubRouteTablesServerTransport { + return NewHubRouteTablesServerTransport(&s.srv.HubRouteTablesServer) + }) + resp, err = s.trHubRouteTablesServer.Do(req) + case "HubVirtualNetworkConnectionsClient": + initServer(s, &s.trHubVirtualNetworkConnectionsServer, func() *HubVirtualNetworkConnectionsServerTransport { + return NewHubVirtualNetworkConnectionsServerTransport(&s.srv.HubVirtualNetworkConnectionsServer) + }) + resp, err = s.trHubVirtualNetworkConnectionsServer.Do(req) + case "IPAllocationsClient": + initServer(s, &s.trIPAllocationsServer, func() *IPAllocationsServerTransport { + return NewIPAllocationsServerTransport(&s.srv.IPAllocationsServer) + }) + resp, err = s.trIPAllocationsServer.Do(req) + case "IPGroupsClient": + initServer(s, &s.trIPGroupsServer, func() *IPGroupsServerTransport { return NewIPGroupsServerTransport(&s.srv.IPGroupsServer) }) + resp, err = s.trIPGroupsServer.Do(req) + case "InboundNatRulesClient": + initServer(s, &s.trInboundNatRulesServer, func() *InboundNatRulesServerTransport { + return NewInboundNatRulesServerTransport(&s.srv.InboundNatRulesServer) + }) + resp, err = s.trInboundNatRulesServer.Do(req) + case "InboundSecurityRuleClient": + initServer(s, &s.trInboundSecurityRuleServer, func() *InboundSecurityRuleServerTransport { + return NewInboundSecurityRuleServerTransport(&s.srv.InboundSecurityRuleServer) + }) + resp, err = s.trInboundSecurityRuleServer.Do(req) + case "InterfaceIPConfigurationsClient": + initServer(s, &s.trInterfaceIPConfigurationsServer, func() *InterfaceIPConfigurationsServerTransport { + return NewInterfaceIPConfigurationsServerTransport(&s.srv.InterfaceIPConfigurationsServer) + }) + resp, err = s.trInterfaceIPConfigurationsServer.Do(req) + case "InterfaceLoadBalancersClient": + initServer(s, &s.trInterfaceLoadBalancersServer, func() *InterfaceLoadBalancersServerTransport { + return NewInterfaceLoadBalancersServerTransport(&s.srv.InterfaceLoadBalancersServer) + }) + resp, err = s.trInterfaceLoadBalancersServer.Do(req) + case "InterfaceTapConfigurationsClient": + initServer(s, &s.trInterfaceTapConfigurationsServer, func() *InterfaceTapConfigurationsServerTransport { + return NewInterfaceTapConfigurationsServerTransport(&s.srv.InterfaceTapConfigurationsServer) + }) + resp, err = s.trInterfaceTapConfigurationsServer.Do(req) + case "InterfacesClient": + initServer(s, &s.trInterfacesServer, func() *InterfacesServerTransport { return NewInterfacesServerTransport(&s.srv.InterfacesServer) }) + resp, err = s.trInterfacesServer.Do(req) + case "LoadBalancerBackendAddressPoolsClient": + initServer(s, &s.trLoadBalancerBackendAddressPoolsServer, func() *LoadBalancerBackendAddressPoolsServerTransport { + return NewLoadBalancerBackendAddressPoolsServerTransport(&s.srv.LoadBalancerBackendAddressPoolsServer) + }) + resp, err = s.trLoadBalancerBackendAddressPoolsServer.Do(req) + case "LoadBalancerFrontendIPConfigurationsClient": + initServer(s, &s.trLoadBalancerFrontendIPConfigurationsServer, func() *LoadBalancerFrontendIPConfigurationsServerTransport { + return NewLoadBalancerFrontendIPConfigurationsServerTransport(&s.srv.LoadBalancerFrontendIPConfigurationsServer) + }) + resp, err = s.trLoadBalancerFrontendIPConfigurationsServer.Do(req) + case "LoadBalancerLoadBalancingRulesClient": + initServer(s, &s.trLoadBalancerLoadBalancingRulesServer, func() *LoadBalancerLoadBalancingRulesServerTransport { + return NewLoadBalancerLoadBalancingRulesServerTransport(&s.srv.LoadBalancerLoadBalancingRulesServer) + }) + resp, err = s.trLoadBalancerLoadBalancingRulesServer.Do(req) + case "LoadBalancerNetworkInterfacesClient": + initServer(s, &s.trLoadBalancerNetworkInterfacesServer, func() *LoadBalancerNetworkInterfacesServerTransport { + return NewLoadBalancerNetworkInterfacesServerTransport(&s.srv.LoadBalancerNetworkInterfacesServer) + }) + resp, err = s.trLoadBalancerNetworkInterfacesServer.Do(req) + case "LoadBalancerOutboundRulesClient": + initServer(s, &s.trLoadBalancerOutboundRulesServer, func() *LoadBalancerOutboundRulesServerTransport { + return NewLoadBalancerOutboundRulesServerTransport(&s.srv.LoadBalancerOutboundRulesServer) + }) + resp, err = s.trLoadBalancerOutboundRulesServer.Do(req) + case "LoadBalancerProbesClient": + initServer(s, &s.trLoadBalancerProbesServer, func() *LoadBalancerProbesServerTransport { + return NewLoadBalancerProbesServerTransport(&s.srv.LoadBalancerProbesServer) + }) + resp, err = s.trLoadBalancerProbesServer.Do(req) + case "LoadBalancersClient": + initServer(s, &s.trLoadBalancersServer, func() *LoadBalancersServerTransport { + return NewLoadBalancersServerTransport(&s.srv.LoadBalancersServer) + }) + resp, err = s.trLoadBalancersServer.Do(req) + case "LocalNetworkGatewaysClient": + initServer(s, &s.trLocalNetworkGatewaysServer, func() *LocalNetworkGatewaysServerTransport { + return NewLocalNetworkGatewaysServerTransport(&s.srv.LocalNetworkGatewaysServer) + }) + resp, err = s.trLocalNetworkGatewaysServer.Do(req) + case "ManagementClient": + initServer(s, &s.trManagementServer, func() *ManagementServerTransport { return NewManagementServerTransport(&s.srv.ManagementServer) }) + resp, err = s.trManagementServer.Do(req) + case "ManagementGroupNetworkManagerConnectionsClient": + initServer(s, &s.trManagementGroupNetworkManagerConnectionsServer, func() *ManagementGroupNetworkManagerConnectionsServerTransport { + return NewManagementGroupNetworkManagerConnectionsServerTransport(&s.srv.ManagementGroupNetworkManagerConnectionsServer) + }) + resp, err = s.trManagementGroupNetworkManagerConnectionsServer.Do(req) + case "ManagerCommitsClient": + initServer(s, &s.trManagerCommitsServer, func() *ManagerCommitsServerTransport { + return NewManagerCommitsServerTransport(&s.srv.ManagerCommitsServer) + }) + resp, err = s.trManagerCommitsServer.Do(req) + case "ManagerDeploymentStatusClient": + initServer(s, &s.trManagerDeploymentStatusServer, func() *ManagerDeploymentStatusServerTransport { + return NewManagerDeploymentStatusServerTransport(&s.srv.ManagerDeploymentStatusServer) + }) + resp, err = s.trManagerDeploymentStatusServer.Do(req) + case "ManagersClient": + initServer(s, &s.trManagersServer, func() *ManagersServerTransport { return NewManagersServerTransport(&s.srv.ManagersServer) }) + resp, err = s.trManagersServer.Do(req) + case "NatGatewaysClient": + initServer(s, &s.trNatGatewaysServer, func() *NatGatewaysServerTransport { return NewNatGatewaysServerTransport(&s.srv.NatGatewaysServer) }) + resp, err = s.trNatGatewaysServer.Do(req) + case "NatRulesClient": + initServer(s, &s.trNatRulesServer, func() *NatRulesServerTransport { return NewNatRulesServerTransport(&s.srv.NatRulesServer) }) + resp, err = s.trNatRulesServer.Do(req) + case "OperationsClient": + initServer(s, &s.trOperationsServer, func() *OperationsServerTransport { return NewOperationsServerTransport(&s.srv.OperationsServer) }) + resp, err = s.trOperationsServer.Do(req) + case "P2SVPNGatewaysClient": + initServer(s, &s.trP2SVPNGatewaysServer, func() *P2SVPNGatewaysServerTransport { + return NewP2SVPNGatewaysServerTransport(&s.srv.P2SVPNGatewaysServer) + }) + resp, err = s.trP2SVPNGatewaysServer.Do(req) + case "PacketCapturesClient": + initServer(s, &s.trPacketCapturesServer, func() *PacketCapturesServerTransport { + return NewPacketCapturesServerTransport(&s.srv.PacketCapturesServer) + }) + resp, err = s.trPacketCapturesServer.Do(req) + case "PeerExpressRouteCircuitConnectionsClient": + initServer(s, &s.trPeerExpressRouteCircuitConnectionsServer, func() *PeerExpressRouteCircuitConnectionsServerTransport { + return NewPeerExpressRouteCircuitConnectionsServerTransport(&s.srv.PeerExpressRouteCircuitConnectionsServer) + }) + resp, err = s.trPeerExpressRouteCircuitConnectionsServer.Do(req) + case "PrivateDNSZoneGroupsClient": + initServer(s, &s.trPrivateDNSZoneGroupsServer, func() *PrivateDNSZoneGroupsServerTransport { + return NewPrivateDNSZoneGroupsServerTransport(&s.srv.PrivateDNSZoneGroupsServer) + }) + resp, err = s.trPrivateDNSZoneGroupsServer.Do(req) + case "PrivateEndpointsClient": + initServer(s, &s.trPrivateEndpointsServer, func() *PrivateEndpointsServerTransport { + return NewPrivateEndpointsServerTransport(&s.srv.PrivateEndpointsServer) + }) + resp, err = s.trPrivateEndpointsServer.Do(req) + case "PrivateLinkServicesClient": + initServer(s, &s.trPrivateLinkServicesServer, func() *PrivateLinkServicesServerTransport { + return NewPrivateLinkServicesServerTransport(&s.srv.PrivateLinkServicesServer) + }) + resp, err = s.trPrivateLinkServicesServer.Do(req) + case "ProfilesClient": + initServer(s, &s.trProfilesServer, func() *ProfilesServerTransport { return NewProfilesServerTransport(&s.srv.ProfilesServer) }) + resp, err = s.trProfilesServer.Do(req) + case "PublicIPAddressesClient": + initServer(s, &s.trPublicIPAddressesServer, func() *PublicIPAddressesServerTransport { + return NewPublicIPAddressesServerTransport(&s.srv.PublicIPAddressesServer) + }) + resp, err = s.trPublicIPAddressesServer.Do(req) + case "PublicIPPrefixesClient": + initServer(s, &s.trPublicIPPrefixesServer, func() *PublicIPPrefixesServerTransport { + return NewPublicIPPrefixesServerTransport(&s.srv.PublicIPPrefixesServer) + }) + resp, err = s.trPublicIPPrefixesServer.Do(req) + case "ResourceNavigationLinksClient": + initServer(s, &s.trResourceNavigationLinksServer, func() *ResourceNavigationLinksServerTransport { + return NewResourceNavigationLinksServerTransport(&s.srv.ResourceNavigationLinksServer) + }) + resp, err = s.trResourceNavigationLinksServer.Do(req) + case "RouteFilterRulesClient": + initServer(s, &s.trRouteFilterRulesServer, func() *RouteFilterRulesServerTransport { + return NewRouteFilterRulesServerTransport(&s.srv.RouteFilterRulesServer) + }) + resp, err = s.trRouteFilterRulesServer.Do(req) + case "RouteFiltersClient": + initServer(s, &s.trRouteFiltersServer, func() *RouteFiltersServerTransport { return NewRouteFiltersServerTransport(&s.srv.RouteFiltersServer) }) + resp, err = s.trRouteFiltersServer.Do(req) + case "RouteMapsClient": + initServer(s, &s.trRouteMapsServer, func() *RouteMapsServerTransport { return NewRouteMapsServerTransport(&s.srv.RouteMapsServer) }) + resp, err = s.trRouteMapsServer.Do(req) + case "RouteTablesClient": + initServer(s, &s.trRouteTablesServer, func() *RouteTablesServerTransport { return NewRouteTablesServerTransport(&s.srv.RouteTablesServer) }) + resp, err = s.trRouteTablesServer.Do(req) + case "RoutesClient": + initServer(s, &s.trRoutesServer, func() *RoutesServerTransport { return NewRoutesServerTransport(&s.srv.RoutesServer) }) + resp, err = s.trRoutesServer.Do(req) + case "RoutingIntentClient": + initServer(s, &s.trRoutingIntentServer, func() *RoutingIntentServerTransport { + return NewRoutingIntentServerTransport(&s.srv.RoutingIntentServer) + }) + resp, err = s.trRoutingIntentServer.Do(req) + case "ScopeConnectionsClient": + initServer(s, &s.trScopeConnectionsServer, func() *ScopeConnectionsServerTransport { + return NewScopeConnectionsServerTransport(&s.srv.ScopeConnectionsServer) + }) + resp, err = s.trScopeConnectionsServer.Do(req) + case "SecurityAdminConfigurationsClient": + initServer(s, &s.trSecurityAdminConfigurationsServer, func() *SecurityAdminConfigurationsServerTransport { + return NewSecurityAdminConfigurationsServerTransport(&s.srv.SecurityAdminConfigurationsServer) + }) + resp, err = s.trSecurityAdminConfigurationsServer.Do(req) + case "SecurityGroupsClient": + initServer(s, &s.trSecurityGroupsServer, func() *SecurityGroupsServerTransport { + return NewSecurityGroupsServerTransport(&s.srv.SecurityGroupsServer) + }) + resp, err = s.trSecurityGroupsServer.Do(req) + case "SecurityPartnerProvidersClient": + initServer(s, &s.trSecurityPartnerProvidersServer, func() *SecurityPartnerProvidersServerTransport { + return NewSecurityPartnerProvidersServerTransport(&s.srv.SecurityPartnerProvidersServer) + }) + resp, err = s.trSecurityPartnerProvidersServer.Do(req) + case "SecurityRulesClient": + initServer(s, &s.trSecurityRulesServer, func() *SecurityRulesServerTransport { + return NewSecurityRulesServerTransport(&s.srv.SecurityRulesServer) + }) + resp, err = s.trSecurityRulesServer.Do(req) + case "ServiceAssociationLinksClient": + initServer(s, &s.trServiceAssociationLinksServer, func() *ServiceAssociationLinksServerTransport { + return NewServiceAssociationLinksServerTransport(&s.srv.ServiceAssociationLinksServer) + }) + resp, err = s.trServiceAssociationLinksServer.Do(req) + case "ServiceEndpointPoliciesClient": + initServer(s, &s.trServiceEndpointPoliciesServer, func() *ServiceEndpointPoliciesServerTransport { + return NewServiceEndpointPoliciesServerTransport(&s.srv.ServiceEndpointPoliciesServer) + }) + resp, err = s.trServiceEndpointPoliciesServer.Do(req) + case "ServiceEndpointPolicyDefinitionsClient": + initServer(s, &s.trServiceEndpointPolicyDefinitionsServer, func() *ServiceEndpointPolicyDefinitionsServerTransport { + return NewServiceEndpointPolicyDefinitionsServerTransport(&s.srv.ServiceEndpointPolicyDefinitionsServer) + }) + resp, err = s.trServiceEndpointPolicyDefinitionsServer.Do(req) + case "ServiceTagInformationClient": + initServer(s, &s.trServiceTagInformationServer, func() *ServiceTagInformationServerTransport { + return NewServiceTagInformationServerTransport(&s.srv.ServiceTagInformationServer) + }) + resp, err = s.trServiceTagInformationServer.Do(req) + case "ServiceTagsClient": + initServer(s, &s.trServiceTagsServer, func() *ServiceTagsServerTransport { return NewServiceTagsServerTransport(&s.srv.ServiceTagsServer) }) + resp, err = s.trServiceTagsServer.Do(req) + case "StaticMembersClient": + initServer(s, &s.trStaticMembersServer, func() *StaticMembersServerTransport { + return NewStaticMembersServerTransport(&s.srv.StaticMembersServer) + }) + resp, err = s.trStaticMembersServer.Do(req) + case "SubnetsClient": + initServer(s, &s.trSubnetsServer, func() *SubnetsServerTransport { return NewSubnetsServerTransport(&s.srv.SubnetsServer) }) + resp, err = s.trSubnetsServer.Do(req) + case "SubscriptionNetworkManagerConnectionsClient": + initServer(s, &s.trSubscriptionNetworkManagerConnectionsServer, func() *SubscriptionNetworkManagerConnectionsServerTransport { + return NewSubscriptionNetworkManagerConnectionsServerTransport(&s.srv.SubscriptionNetworkManagerConnectionsServer) + }) + resp, err = s.trSubscriptionNetworkManagerConnectionsServer.Do(req) + case "UsagesClient": + initServer(s, &s.trUsagesServer, func() *UsagesServerTransport { return NewUsagesServerTransport(&s.srv.UsagesServer) }) + resp, err = s.trUsagesServer.Do(req) + case "VPNConnectionsClient": + initServer(s, &s.trVPNConnectionsServer, func() *VPNConnectionsServerTransport { + return NewVPNConnectionsServerTransport(&s.srv.VPNConnectionsServer) + }) + resp, err = s.trVPNConnectionsServer.Do(req) + case "VPNGatewaysClient": + initServer(s, &s.trVPNGatewaysServer, func() *VPNGatewaysServerTransport { return NewVPNGatewaysServerTransport(&s.srv.VPNGatewaysServer) }) + resp, err = s.trVPNGatewaysServer.Do(req) + case "VPNLinkConnectionsClient": + initServer(s, &s.trVPNLinkConnectionsServer, func() *VPNLinkConnectionsServerTransport { + return NewVPNLinkConnectionsServerTransport(&s.srv.VPNLinkConnectionsServer) + }) + resp, err = s.trVPNLinkConnectionsServer.Do(req) + case "VPNServerConfigurationsAssociatedWithVirtualWanClient": + initServer(s, &s.trVPNServerConfigurationsAssociatedWithVirtualWanServer, func() *VPNServerConfigurationsAssociatedWithVirtualWanServerTransport { + return NewVPNServerConfigurationsAssociatedWithVirtualWanServerTransport(&s.srv.VPNServerConfigurationsAssociatedWithVirtualWanServer) + }) + resp, err = s.trVPNServerConfigurationsAssociatedWithVirtualWanServer.Do(req) + case "VPNServerConfigurationsClient": + initServer(s, &s.trVPNServerConfigurationsServer, func() *VPNServerConfigurationsServerTransport { + return NewVPNServerConfigurationsServerTransport(&s.srv.VPNServerConfigurationsServer) + }) + resp, err = s.trVPNServerConfigurationsServer.Do(req) + case "VPNSiteLinkConnectionsClient": + initServer(s, &s.trVPNSiteLinkConnectionsServer, func() *VPNSiteLinkConnectionsServerTransport { + return NewVPNSiteLinkConnectionsServerTransport(&s.srv.VPNSiteLinkConnectionsServer) + }) + resp, err = s.trVPNSiteLinkConnectionsServer.Do(req) + case "VPNSiteLinksClient": + initServer(s, &s.trVPNSiteLinksServer, func() *VPNSiteLinksServerTransport { return NewVPNSiteLinksServerTransport(&s.srv.VPNSiteLinksServer) }) + resp, err = s.trVPNSiteLinksServer.Do(req) + case "VPNSitesClient": + initServer(s, &s.trVPNSitesServer, func() *VPNSitesServerTransport { return NewVPNSitesServerTransport(&s.srv.VPNSitesServer) }) + resp, err = s.trVPNSitesServer.Do(req) + case "VPNSitesConfigurationClient": + initServer(s, &s.trVPNSitesConfigurationServer, func() *VPNSitesConfigurationServerTransport { + return NewVPNSitesConfigurationServerTransport(&s.srv.VPNSitesConfigurationServer) + }) + resp, err = s.trVPNSitesConfigurationServer.Do(req) + case "VipSwapClient": + initServer(s, &s.trVipSwapServer, func() *VipSwapServerTransport { return NewVipSwapServerTransport(&s.srv.VipSwapServer) }) + resp, err = s.trVipSwapServer.Do(req) + case "VirtualApplianceConnectionsClient": + initServer(s, &s.trVirtualApplianceConnectionsServer, func() *VirtualApplianceConnectionsServerTransport { + return NewVirtualApplianceConnectionsServerTransport(&s.srv.VirtualApplianceConnectionsServer) + }) + resp, err = s.trVirtualApplianceConnectionsServer.Do(req) + case "VirtualApplianceSKUsClient": + initServer(s, &s.trVirtualApplianceSKUsServer, func() *VirtualApplianceSKUsServerTransport { + return NewVirtualApplianceSKUsServerTransport(&s.srv.VirtualApplianceSKUsServer) + }) + resp, err = s.trVirtualApplianceSKUsServer.Do(req) + case "VirtualApplianceSitesClient": + initServer(s, &s.trVirtualApplianceSitesServer, func() *VirtualApplianceSitesServerTransport { + return NewVirtualApplianceSitesServerTransport(&s.srv.VirtualApplianceSitesServer) + }) + resp, err = s.trVirtualApplianceSitesServer.Do(req) + case "VirtualAppliancesClient": + initServer(s, &s.trVirtualAppliancesServer, func() *VirtualAppliancesServerTransport { + return NewVirtualAppliancesServerTransport(&s.srv.VirtualAppliancesServer) + }) + resp, err = s.trVirtualAppliancesServer.Do(req) + case "VirtualHubBgpConnectionClient": + initServer(s, &s.trVirtualHubBgpConnectionServer, func() *VirtualHubBgpConnectionServerTransport { + return NewVirtualHubBgpConnectionServerTransport(&s.srv.VirtualHubBgpConnectionServer) + }) + resp, err = s.trVirtualHubBgpConnectionServer.Do(req) + case "VirtualHubBgpConnectionsClient": + initServer(s, &s.trVirtualHubBgpConnectionsServer, func() *VirtualHubBgpConnectionsServerTransport { + return NewVirtualHubBgpConnectionsServerTransport(&s.srv.VirtualHubBgpConnectionsServer) + }) + resp, err = s.trVirtualHubBgpConnectionsServer.Do(req) + case "VirtualHubIPConfigurationClient": + initServer(s, &s.trVirtualHubIPConfigurationServer, func() *VirtualHubIPConfigurationServerTransport { + return NewVirtualHubIPConfigurationServerTransport(&s.srv.VirtualHubIPConfigurationServer) + }) + resp, err = s.trVirtualHubIPConfigurationServer.Do(req) + case "VirtualHubRouteTableV2SClient": + initServer(s, &s.trVirtualHubRouteTableV2SServer, func() *VirtualHubRouteTableV2SServerTransport { + return NewVirtualHubRouteTableV2SServerTransport(&s.srv.VirtualHubRouteTableV2SServer) + }) + resp, err = s.trVirtualHubRouteTableV2SServer.Do(req) + case "VirtualHubsClient": + initServer(s, &s.trVirtualHubsServer, func() *VirtualHubsServerTransport { return NewVirtualHubsServerTransport(&s.srv.VirtualHubsServer) }) + resp, err = s.trVirtualHubsServer.Do(req) + case "VirtualNetworkGatewayConnectionsClient": + initServer(s, &s.trVirtualNetworkGatewayConnectionsServer, func() *VirtualNetworkGatewayConnectionsServerTransport { + return NewVirtualNetworkGatewayConnectionsServerTransport(&s.srv.VirtualNetworkGatewayConnectionsServer) + }) + resp, err = s.trVirtualNetworkGatewayConnectionsServer.Do(req) + case "VirtualNetworkGatewayNatRulesClient": + initServer(s, &s.trVirtualNetworkGatewayNatRulesServer, func() *VirtualNetworkGatewayNatRulesServerTransport { + return NewVirtualNetworkGatewayNatRulesServerTransport(&s.srv.VirtualNetworkGatewayNatRulesServer) + }) + resp, err = s.trVirtualNetworkGatewayNatRulesServer.Do(req) + case "VirtualNetworkGatewaysClient": + initServer(s, &s.trVirtualNetworkGatewaysServer, func() *VirtualNetworkGatewaysServerTransport { + return NewVirtualNetworkGatewaysServerTransport(&s.srv.VirtualNetworkGatewaysServer) + }) + resp, err = s.trVirtualNetworkGatewaysServer.Do(req) + case "VirtualNetworkPeeringsClient": + initServer(s, &s.trVirtualNetworkPeeringsServer, func() *VirtualNetworkPeeringsServerTransport { + return NewVirtualNetworkPeeringsServerTransport(&s.srv.VirtualNetworkPeeringsServer) + }) + resp, err = s.trVirtualNetworkPeeringsServer.Do(req) + case "VirtualNetworkTapsClient": + initServer(s, &s.trVirtualNetworkTapsServer, func() *VirtualNetworkTapsServerTransport { + return NewVirtualNetworkTapsServerTransport(&s.srv.VirtualNetworkTapsServer) + }) + resp, err = s.trVirtualNetworkTapsServer.Do(req) + case "VirtualNetworksClient": + initServer(s, &s.trVirtualNetworksServer, func() *VirtualNetworksServerTransport { + return NewVirtualNetworksServerTransport(&s.srv.VirtualNetworksServer) + }) + resp, err = s.trVirtualNetworksServer.Do(req) + case "VirtualRouterPeeringsClient": + initServer(s, &s.trVirtualRouterPeeringsServer, func() *VirtualRouterPeeringsServerTransport { + return NewVirtualRouterPeeringsServerTransport(&s.srv.VirtualRouterPeeringsServer) + }) + resp, err = s.trVirtualRouterPeeringsServer.Do(req) + case "VirtualRoutersClient": + initServer(s, &s.trVirtualRoutersServer, func() *VirtualRoutersServerTransport { + return NewVirtualRoutersServerTransport(&s.srv.VirtualRoutersServer) + }) + resp, err = s.trVirtualRoutersServer.Do(req) + case "VirtualWansClient": + initServer(s, &s.trVirtualWansServer, func() *VirtualWansServerTransport { return NewVirtualWansServerTransport(&s.srv.VirtualWansServer) }) + resp, err = s.trVirtualWansServer.Do(req) + case "WatchersClient": + initServer(s, &s.trWatchersServer, func() *WatchersServerTransport { return NewWatchersServerTransport(&s.srv.WatchersServer) }) + resp, err = s.trWatchersServer.Do(req) + case "WebApplicationFirewallPoliciesClient": + initServer(s, &s.trWebApplicationFirewallPoliciesServer, func() *WebApplicationFirewallPoliciesServerTransport { + return NewWebApplicationFirewallPoliciesServerTransport(&s.srv.WebApplicationFirewallPoliciesServer) + }) + resp, err = s.trWebApplicationFirewallPoliciesServer.Do(req) + case "WebCategoriesClient": + initServer(s, &s.trWebCategoriesServer, func() *WebCategoriesServerTransport { + return NewWebCategoriesServerTransport(&s.srv.WebCategoriesServer) + }) + resp, err = s.trWebCategoriesServer.Do(req) + default: + err = fmt.Errorf("unhandled client %s", client) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func initServer[T any](s *ServerFactoryTransport, dst **T, src func() *T) { + s.trMu.Lock() + if *dst == nil { + *dst = src() + } + s.trMu.Unlock() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceassociationlinks_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceassociationlinks_server.go new file mode 100644 index 00000000000..12b97981c57 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceassociationlinks_server.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ServiceAssociationLinksServer is a fake server for instances of the armnetwork.ServiceAssociationLinksClient type. +type ServiceAssociationLinksServer struct { + // List is the fake for method ServiceAssociationLinksClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *armnetwork.ServiceAssociationLinksClientListOptions) (resp azfake.Responder[armnetwork.ServiceAssociationLinksClientListResponse], errResp azfake.ErrorResponder) +} + +// NewServiceAssociationLinksServerTransport creates a new instance of ServiceAssociationLinksServerTransport with the provided implementation. +// The returned ServiceAssociationLinksServerTransport instance is connected to an instance of armnetwork.ServiceAssociationLinksClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewServiceAssociationLinksServerTransport(srv *ServiceAssociationLinksServer) *ServiceAssociationLinksServerTransport { + return &ServiceAssociationLinksServerTransport{srv: srv} +} + +// ServiceAssociationLinksServerTransport connects instances of armnetwork.ServiceAssociationLinksClient to instances of ServiceAssociationLinksServer. +// Don't use this type directly, use NewServiceAssociationLinksServerTransport instead. +type ServiceAssociationLinksServerTransport struct { + srv *ServiceAssociationLinksServer +} + +// Do implements the policy.Transporter interface for ServiceAssociationLinksServerTransport. +func (s *ServiceAssociationLinksServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ServiceAssociationLinksClient.List": + resp, err = s.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *ServiceAssociationLinksServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if s.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ServiceAssociationLinks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + subnetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("subnetName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.List(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, subnetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ServiceAssociationLinksListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceendpointpolicies_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceendpointpolicies_server.go new file mode 100644 index 00000000000..0e499bb1306 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceendpointpolicies_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ServiceEndpointPoliciesServer is a fake server for instances of the armnetwork.ServiceEndpointPoliciesClient type. +type ServiceEndpointPoliciesServer struct { + // BeginCreateOrUpdate is the fake for method ServiceEndpointPoliciesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters armnetwork.ServiceEndpointPolicy, options *armnetwork.ServiceEndpointPoliciesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ServiceEndpointPoliciesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ServiceEndpointPoliciesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, options *armnetwork.ServiceEndpointPoliciesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ServiceEndpointPoliciesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ServiceEndpointPoliciesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, options *armnetwork.ServiceEndpointPoliciesClientGetOptions) (resp azfake.Responder[armnetwork.ServiceEndpointPoliciesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method ServiceEndpointPoliciesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.ServiceEndpointPoliciesClientListOptions) (resp azfake.PagerResponder[armnetwork.ServiceEndpointPoliciesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method ServiceEndpointPoliciesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.ServiceEndpointPoliciesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.ServiceEndpointPoliciesClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method ServiceEndpointPoliciesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters armnetwork.TagsObject, options *armnetwork.ServiceEndpointPoliciesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.ServiceEndpointPoliciesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewServiceEndpointPoliciesServerTransport creates a new instance of ServiceEndpointPoliciesServerTransport with the provided implementation. +// The returned ServiceEndpointPoliciesServerTransport instance is connected to an instance of armnetwork.ServiceEndpointPoliciesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewServiceEndpointPoliciesServerTransport(srv *ServiceEndpointPoliciesServer) *ServiceEndpointPoliciesServerTransport { + return &ServiceEndpointPoliciesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ServiceEndpointPoliciesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ServiceEndpointPoliciesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.ServiceEndpointPoliciesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.ServiceEndpointPoliciesClientListByResourceGroupResponse]](), + } +} + +// ServiceEndpointPoliciesServerTransport connects instances of armnetwork.ServiceEndpointPoliciesClient to instances of ServiceEndpointPoliciesServer. +// Don't use this type directly, use NewServiceEndpointPoliciesServerTransport instead. +type ServiceEndpointPoliciesServerTransport struct { + srv *ServiceEndpointPoliciesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ServiceEndpointPoliciesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ServiceEndpointPoliciesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.ServiceEndpointPoliciesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.ServiceEndpointPoliciesClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for ServiceEndpointPoliciesServerTransport. +func (s *ServiceEndpointPoliciesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ServiceEndpointPoliciesClient.BeginCreateOrUpdate": + resp, err = s.dispatchBeginCreateOrUpdate(req) + case "ServiceEndpointPoliciesClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "ServiceEndpointPoliciesClient.Get": + resp, err = s.dispatchGet(req) + case "ServiceEndpointPoliciesClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + case "ServiceEndpointPoliciesClient.NewListByResourceGroupPager": + resp, err = s.dispatchNewListByResourceGroupPager(req) + case "ServiceEndpointPoliciesClient.UpdateTags": + resp, err = s.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *ServiceEndpointPoliciesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := s.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ServiceEndpointPolicy](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, serviceEndpointPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + s.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + s.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + s.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (s *ServiceEndpointPoliciesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, serviceEndpointPolicyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *ServiceEndpointPoliciesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.ServiceEndpointPoliciesClientGetOptions + if expandParam != nil { + options = &armnetwork.ServiceEndpointPoliciesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, serviceEndpointPolicyNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ServiceEndpointPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *ServiceEndpointPoliciesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ServiceEndpointPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := s.srv.NewListPager(nil) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ServiceEndpointPoliciesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} + +func (s *ServiceEndpointPoliciesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := s.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + s.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.ServiceEndpointPoliciesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + s.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (s *ServiceEndpointPoliciesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if s.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.UpdateTags(req.Context(), resourceGroupNameParam, serviceEndpointPolicyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ServiceEndpointPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceendpointpolicydefinitions_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceendpointpolicydefinitions_server.go new file mode 100644 index 00000000000..3544f2aa369 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/serviceendpointpolicydefinitions_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ServiceEndpointPolicyDefinitionsServer is a fake server for instances of the armnetwork.ServiceEndpointPolicyDefinitionsClient type. +type ServiceEndpointPolicyDefinitionsServer struct { + // BeginCreateOrUpdate is the fake for method ServiceEndpointPolicyDefinitionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, serviceEndpointPolicyDefinitions armnetwork.ServiceEndpointPolicyDefinition, options *armnetwork.ServiceEndpointPolicyDefinitionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method ServiceEndpointPolicyDefinitionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, options *armnetwork.ServiceEndpointPolicyDefinitionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method ServiceEndpointPolicyDefinitionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, options *armnetwork.ServiceEndpointPolicyDefinitionsClientGetOptions) (resp azfake.Responder[armnetwork.ServiceEndpointPolicyDefinitionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByResourceGroupPager is the fake for method ServiceEndpointPolicyDefinitionsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, serviceEndpointPolicyName string, options *armnetwork.ServiceEndpointPolicyDefinitionsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse]) +} + +// NewServiceEndpointPolicyDefinitionsServerTransport creates a new instance of ServiceEndpointPolicyDefinitionsServerTransport with the provided implementation. +// The returned ServiceEndpointPolicyDefinitionsServerTransport instance is connected to an instance of armnetwork.ServiceEndpointPolicyDefinitionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewServiceEndpointPolicyDefinitionsServerTransport(srv *ServiceEndpointPolicyDefinitionsServer) *ServiceEndpointPolicyDefinitionsServerTransport { + return &ServiceEndpointPolicyDefinitionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientDeleteResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse]](), + } +} + +// ServiceEndpointPolicyDefinitionsServerTransport connects instances of armnetwork.ServiceEndpointPolicyDefinitionsClient to instances of ServiceEndpointPolicyDefinitionsServer. +// Don't use this type directly, use NewServiceEndpointPolicyDefinitionsServerTransport instead. +type ServiceEndpointPolicyDefinitionsServerTransport struct { + srv *ServiceEndpointPolicyDefinitionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientDeleteResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for ServiceEndpointPolicyDefinitionsServerTransport. +func (s *ServiceEndpointPolicyDefinitionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ServiceEndpointPolicyDefinitionsClient.BeginCreateOrUpdate": + resp, err = s.dispatchBeginCreateOrUpdate(req) + case "ServiceEndpointPolicyDefinitionsClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "ServiceEndpointPolicyDefinitionsClient.Get": + resp, err = s.dispatchGet(req) + case "ServiceEndpointPolicyDefinitionsClient.NewListByResourceGroupPager": + resp, err = s.dispatchNewListByResourceGroupPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *ServiceEndpointPolicyDefinitionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := s.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/serviceEndpointPolicyDefinitions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ServiceEndpointPolicyDefinition](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyDefinitionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyDefinitionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, serviceEndpointPolicyNameParam, serviceEndpointPolicyDefinitionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + s.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + s.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + s.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (s *ServiceEndpointPolicyDefinitionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/serviceEndpointPolicyDefinitions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyDefinitionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyDefinitionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, serviceEndpointPolicyNameParam, serviceEndpointPolicyDefinitionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *ServiceEndpointPolicyDefinitionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/serviceEndpointPolicyDefinitions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyDefinitionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyDefinitionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, serviceEndpointPolicyNameParam, serviceEndpointPolicyDefinitionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ServiceEndpointPolicyDefinition, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *ServiceEndpointPolicyDefinitionsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := s.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/serviceEndpointPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/serviceEndpointPolicyDefinitions` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + serviceEndpointPolicyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("serviceEndpointPolicyName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListByResourceGroupPager(resourceGroupNameParam, serviceEndpointPolicyNameParam, nil) + newListByResourceGroupPager = &resp + s.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + s.newListByResourceGroupPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/servicetaginformation_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/servicetaginformation_server.go new file mode 100644 index 00000000000..3caac119497 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/servicetaginformation_server.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// ServiceTagInformationServer is a fake server for instances of the armnetwork.ServiceTagInformationClient type. +type ServiceTagInformationServer struct { + // NewListPager is the fake for method ServiceTagInformationClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armnetwork.ServiceTagInformationClientListOptions) (resp azfake.PagerResponder[armnetwork.ServiceTagInformationClientListResponse]) +} + +// NewServiceTagInformationServerTransport creates a new instance of ServiceTagInformationServerTransport with the provided implementation. +// The returned ServiceTagInformationServerTransport instance is connected to an instance of armnetwork.ServiceTagInformationClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewServiceTagInformationServerTransport(srv *ServiceTagInformationServer) *ServiceTagInformationServerTransport { + return &ServiceTagInformationServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.ServiceTagInformationClientListResponse]](), + } +} + +// ServiceTagInformationServerTransport connects instances of armnetwork.ServiceTagInformationClient to instances of ServiceTagInformationServer. +// Don't use this type directly, use NewServiceTagInformationServerTransport instead. +type ServiceTagInformationServerTransport struct { + srv *ServiceTagInformationServer + newListPager *tracker[azfake.PagerResponder[armnetwork.ServiceTagInformationClientListResponse]] +} + +// Do implements the policy.Transporter interface for ServiceTagInformationServerTransport. +func (s *ServiceTagInformationServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ServiceTagInformationClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *ServiceTagInformationServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/serviceTagDetails` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + noAddressPrefixesUnescaped, err := url.QueryUnescape(qp.Get("noAddressPrefixes")) + if err != nil { + return nil, err + } + noAddressPrefixesParam, err := parseOptional(noAddressPrefixesUnescaped, strconv.ParseBool) + if err != nil { + return nil, err + } + tagNameUnescaped, err := url.QueryUnescape(qp.Get("tagName")) + if err != nil { + return nil, err + } + tagNameParam := getOptional(tagNameUnescaped) + var options *armnetwork.ServiceTagInformationClientListOptions + if noAddressPrefixesParam != nil || tagNameParam != nil { + options = &armnetwork.ServiceTagInformationClientListOptions{ + NoAddressPrefixes: noAddressPrefixesParam, + TagName: tagNameParam, + } + } + resp := s.srv.NewListPager(locationParam, options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.ServiceTagInformationClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/servicetags_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/servicetags_server.go new file mode 100644 index 00000000000..d01f788c5cd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/servicetags_server.go @@ -0,0 +1,96 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// ServiceTagsServer is a fake server for instances of the armnetwork.ServiceTagsClient type. +type ServiceTagsServer struct { + // List is the fake for method ServiceTagsClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, location string, options *armnetwork.ServiceTagsClientListOptions) (resp azfake.Responder[armnetwork.ServiceTagsClientListResponse], errResp azfake.ErrorResponder) +} + +// NewServiceTagsServerTransport creates a new instance of ServiceTagsServerTransport with the provided implementation. +// The returned ServiceTagsServerTransport instance is connected to an instance of armnetwork.ServiceTagsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewServiceTagsServerTransport(srv *ServiceTagsServer) *ServiceTagsServerTransport { + return &ServiceTagsServerTransport{srv: srv} +} + +// ServiceTagsServerTransport connects instances of armnetwork.ServiceTagsClient to instances of ServiceTagsServer. +// Don't use this type directly, use NewServiceTagsServerTransport instead. +type ServiceTagsServerTransport struct { + srv *ServiceTagsServer +} + +// Do implements the policy.Transporter interface for ServiceTagsServerTransport. +func (s *ServiceTagsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ServiceTagsClient.List": + resp, err = s.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *ServiceTagsServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if s.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/serviceTags` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.List(req.Context(), locationParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ServiceTagsListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/staticmembers_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/staticmembers_server.go new file mode 100644 index 00000000000..d7bcf604530 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/staticmembers_server.go @@ -0,0 +1,290 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// StaticMembersServer is a fake server for instances of the armnetwork.StaticMembersClient type. +type StaticMembersServer struct { + // CreateOrUpdate is the fake for method StaticMembersClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, staticMemberName string, parameters armnetwork.StaticMember, options *armnetwork.StaticMembersClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.StaticMembersClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method StaticMembersClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, staticMemberName string, options *armnetwork.StaticMembersClientDeleteOptions) (resp azfake.Responder[armnetwork.StaticMembersClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method StaticMembersClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, staticMemberName string, options *armnetwork.StaticMembersClientGetOptions) (resp azfake.Responder[armnetwork.StaticMembersClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method StaticMembersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkManagerName string, networkGroupName string, options *armnetwork.StaticMembersClientListOptions) (resp azfake.PagerResponder[armnetwork.StaticMembersClientListResponse]) +} + +// NewStaticMembersServerTransport creates a new instance of StaticMembersServerTransport with the provided implementation. +// The returned StaticMembersServerTransport instance is connected to an instance of armnetwork.StaticMembersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewStaticMembersServerTransport(srv *StaticMembersServer) *StaticMembersServerTransport { + return &StaticMembersServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.StaticMembersClientListResponse]](), + } +} + +// StaticMembersServerTransport connects instances of armnetwork.StaticMembersClient to instances of StaticMembersServer. +// Don't use this type directly, use NewStaticMembersServerTransport instead. +type StaticMembersServerTransport struct { + srv *StaticMembersServer + newListPager *tracker[azfake.PagerResponder[armnetwork.StaticMembersClientListResponse]] +} + +// Do implements the policy.Transporter interface for StaticMembersServerTransport. +func (s *StaticMembersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "StaticMembersClient.CreateOrUpdate": + resp, err = s.dispatchCreateOrUpdate(req) + case "StaticMembersClient.Delete": + resp, err = s.dispatchDelete(req) + case "StaticMembersClient.Get": + resp, err = s.dispatchGet(req) + case "StaticMembersClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *StaticMembersServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/staticMembers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.StaticMember](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + networkGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkGroupName")]) + if err != nil { + return nil, err + } + staticMemberNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("staticMemberName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkManagerNameParam, networkGroupNameParam, staticMemberNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).StaticMember, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *StaticMembersServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if s.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/staticMembers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + networkGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkGroupName")]) + if err != nil { + return nil, err + } + staticMemberNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("staticMemberName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Delete(req.Context(), resourceGroupNameParam, networkManagerNameParam, networkGroupNameParam, staticMemberNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *StaticMembersServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/staticMembers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + networkGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkGroupName")]) + if err != nil { + return nil, err + } + staticMemberNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("staticMemberName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, networkManagerNameParam, networkGroupNameParam, staticMemberNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).StaticMember, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *StaticMembersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/staticMembers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkManagerNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerName")]) + if err != nil { + return nil, err + } + networkGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkGroupName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.StaticMembersClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.StaticMembersClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := s.srv.NewListPager(resourceGroupNameParam, networkManagerNameParam, networkGroupNameParam, options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.StaticMembersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/subnets_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/subnets_server.go new file mode 100644 index 00000000000..2f60254cbed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/subnets_server.go @@ -0,0 +1,404 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// SubnetsServer is a fake server for instances of the armnetwork.SubnetsClient type. +type SubnetsServer struct { + // BeginCreateOrUpdate is the fake for method SubnetsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters armnetwork.Subnet, options *armnetwork.SubnetsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.SubnetsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method SubnetsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *armnetwork.SubnetsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.SubnetsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SubnetsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *armnetwork.SubnetsClientGetOptions) (resp azfake.Responder[armnetwork.SubnetsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SubnetsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualNetworkName string, options *armnetwork.SubnetsClientListOptions) (resp azfake.PagerResponder[armnetwork.SubnetsClientListResponse]) + + // BeginPrepareNetworkPolicies is the fake for method SubnetsClient.BeginPrepareNetworkPolicies + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginPrepareNetworkPolicies func(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, prepareNetworkPoliciesRequestParameters armnetwork.PrepareNetworkPoliciesRequest, options *armnetwork.SubnetsClientBeginPrepareNetworkPoliciesOptions) (resp azfake.PollerResponder[armnetwork.SubnetsClientPrepareNetworkPoliciesResponse], errResp azfake.ErrorResponder) + + // BeginUnprepareNetworkPolicies is the fake for method SubnetsClient.BeginUnprepareNetworkPolicies + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUnprepareNetworkPolicies func(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, unprepareNetworkPoliciesRequestParameters armnetwork.UnprepareNetworkPoliciesRequest, options *armnetwork.SubnetsClientBeginUnprepareNetworkPoliciesOptions) (resp azfake.PollerResponder[armnetwork.SubnetsClientUnprepareNetworkPoliciesResponse], errResp azfake.ErrorResponder) +} + +// NewSubnetsServerTransport creates a new instance of SubnetsServerTransport with the provided implementation. +// The returned SubnetsServerTransport instance is connected to an instance of armnetwork.SubnetsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSubnetsServerTransport(srv *SubnetsServer) *SubnetsServerTransport { + return &SubnetsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.SubnetsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.SubnetsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.SubnetsClientListResponse]](), + beginPrepareNetworkPolicies: newTracker[azfake.PollerResponder[armnetwork.SubnetsClientPrepareNetworkPoliciesResponse]](), + beginUnprepareNetworkPolicies: newTracker[azfake.PollerResponder[armnetwork.SubnetsClientUnprepareNetworkPoliciesResponse]](), + } +} + +// SubnetsServerTransport connects instances of armnetwork.SubnetsClient to instances of SubnetsServer. +// Don't use this type directly, use NewSubnetsServerTransport instead. +type SubnetsServerTransport struct { + srv *SubnetsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.SubnetsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.SubnetsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.SubnetsClientListResponse]] + beginPrepareNetworkPolicies *tracker[azfake.PollerResponder[armnetwork.SubnetsClientPrepareNetworkPoliciesResponse]] + beginUnprepareNetworkPolicies *tracker[azfake.PollerResponder[armnetwork.SubnetsClientUnprepareNetworkPoliciesResponse]] +} + +// Do implements the policy.Transporter interface for SubnetsServerTransport. +func (s *SubnetsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SubnetsClient.BeginCreateOrUpdate": + resp, err = s.dispatchBeginCreateOrUpdate(req) + case "SubnetsClient.BeginDelete": + resp, err = s.dispatchBeginDelete(req) + case "SubnetsClient.Get": + resp, err = s.dispatchGet(req) + case "SubnetsClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + case "SubnetsClient.BeginPrepareNetworkPolicies": + resp, err = s.dispatchBeginPrepareNetworkPolicies(req) + case "SubnetsClient.BeginUnprepareNetworkPolicies": + resp, err = s.dispatchBeginUnprepareNetworkPolicies(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SubnetsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := s.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.Subnet](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + subnetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("subnetName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, subnetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + s.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + s.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + s.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (s *SubnetsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if s.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := s.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + subnetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("subnetName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, subnetNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + s.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + s.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + s.beginDelete.remove(req) + } + + return resp, nil +} + +func (s *SubnetsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + subnetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("subnetName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.SubnetsClientGetOptions + if expandParam != nil { + options = &armnetwork.SubnetsClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := s.srv.Get(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, subnetNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Subnet, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SubnetsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + resp := s.srv.NewListPager(resourceGroupNameParam, virtualNetworkNameParam, nil) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.SubnetsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} + +func (s *SubnetsServerTransport) dispatchBeginPrepareNetworkPolicies(req *http.Request) (*http.Response, error) { + if s.srv.BeginPrepareNetworkPolicies == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginPrepareNetworkPolicies not implemented")} + } + beginPrepareNetworkPolicies := s.beginPrepareNetworkPolicies.get(req) + if beginPrepareNetworkPolicies == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/PrepareNetworkPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.PrepareNetworkPoliciesRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + subnetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("subnetName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginPrepareNetworkPolicies(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, subnetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginPrepareNetworkPolicies = &respr + s.beginPrepareNetworkPolicies.add(req, beginPrepareNetworkPolicies) + } + + resp, err := server.PollerResponderNext(beginPrepareNetworkPolicies, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + s.beginPrepareNetworkPolicies.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginPrepareNetworkPolicies) { + s.beginPrepareNetworkPolicies.remove(req) + } + + return resp, nil +} + +func (s *SubnetsServerTransport) dispatchBeginUnprepareNetworkPolicies(req *http.Request) (*http.Response, error) { + if s.srv.BeginUnprepareNetworkPolicies == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUnprepareNetworkPolicies not implemented")} + } + beginUnprepareNetworkPolicies := s.beginUnprepareNetworkPolicies.get(req) + if beginUnprepareNetworkPolicies == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/subnets/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/UnprepareNetworkPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.UnprepareNetworkPoliciesRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + subnetNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("subnetName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.BeginUnprepareNetworkPolicies(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, subnetNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUnprepareNetworkPolicies = &respr + s.beginUnprepareNetworkPolicies.add(req, beginUnprepareNetworkPolicies) + } + + resp, err := server.PollerResponderNext(beginUnprepareNetworkPolicies, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + s.beginUnprepareNetworkPolicies.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUnprepareNetworkPolicies) { + s.beginUnprepareNetworkPolicies.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/subscriptionnetworkmanagerconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/subscriptionnetworkmanagerconnections_server.go new file mode 100644 index 00000000000..2214616579c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/subscriptionnetworkmanagerconnections_server.go @@ -0,0 +1,242 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// SubscriptionNetworkManagerConnectionsServer is a fake server for instances of the armnetwork.SubscriptionNetworkManagerConnectionsClient type. +type SubscriptionNetworkManagerConnectionsServer struct { + // CreateOrUpdate is the fake for method SubscriptionNetworkManagerConnectionsClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, networkManagerConnectionName string, parameters armnetwork.ManagerConnection, options *armnetwork.SubscriptionNetworkManagerConnectionsClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.SubscriptionNetworkManagerConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // Delete is the fake for method SubscriptionNetworkManagerConnectionsClient.Delete + // HTTP status codes to indicate success: http.StatusOK, http.StatusNoContent + Delete func(ctx context.Context, networkManagerConnectionName string, options *armnetwork.SubscriptionNetworkManagerConnectionsClientDeleteOptions) (resp azfake.Responder[armnetwork.SubscriptionNetworkManagerConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method SubscriptionNetworkManagerConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, networkManagerConnectionName string, options *armnetwork.SubscriptionNetworkManagerConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.SubscriptionNetworkManagerConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method SubscriptionNetworkManagerConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.SubscriptionNetworkManagerConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.SubscriptionNetworkManagerConnectionsClientListResponse]) +} + +// NewSubscriptionNetworkManagerConnectionsServerTransport creates a new instance of SubscriptionNetworkManagerConnectionsServerTransport with the provided implementation. +// The returned SubscriptionNetworkManagerConnectionsServerTransport instance is connected to an instance of armnetwork.SubscriptionNetworkManagerConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewSubscriptionNetworkManagerConnectionsServerTransport(srv *SubscriptionNetworkManagerConnectionsServer) *SubscriptionNetworkManagerConnectionsServerTransport { + return &SubscriptionNetworkManagerConnectionsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.SubscriptionNetworkManagerConnectionsClientListResponse]](), + } +} + +// SubscriptionNetworkManagerConnectionsServerTransport connects instances of armnetwork.SubscriptionNetworkManagerConnectionsClient to instances of SubscriptionNetworkManagerConnectionsServer. +// Don't use this type directly, use NewSubscriptionNetworkManagerConnectionsServerTransport instead. +type SubscriptionNetworkManagerConnectionsServerTransport struct { + srv *SubscriptionNetworkManagerConnectionsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.SubscriptionNetworkManagerConnectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for SubscriptionNetworkManagerConnectionsServerTransport. +func (s *SubscriptionNetworkManagerConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "SubscriptionNetworkManagerConnectionsClient.CreateOrUpdate": + resp, err = s.dispatchCreateOrUpdate(req) + case "SubscriptionNetworkManagerConnectionsClient.Delete": + resp, err = s.dispatchDelete(req) + case "SubscriptionNetworkManagerConnectionsClient.Get": + resp, err = s.dispatchGet(req) + case "SubscriptionNetworkManagerConnectionsClient.NewListPager": + resp, err = s.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (s *SubscriptionNetworkManagerConnectionsServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if s.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ManagerConnection](req) + if err != nil { + return nil, err + } + networkManagerConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.CreateOrUpdate(req.Context(), networkManagerConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ManagerConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SubscriptionNetworkManagerConnectionsServerTransport) dispatchDelete(req *http.Request) (*http.Response, error) { + if s.srv.Delete == nil { + return nil, &nonRetriableError{errors.New("fake for method Delete not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + networkManagerConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Delete(req.Context(), networkManagerConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusNoContent}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusNoContent", respContent.HTTPStatus)} + } + resp, err := server.NewResponse(respContent, req, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SubscriptionNetworkManagerConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if s.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + networkManagerConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkManagerConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := s.srv.Get(req.Context(), networkManagerConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ManagerConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (s *SubscriptionNetworkManagerConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if s.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := s.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkManagerConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + topUnescaped, err := url.QueryUnescape(qp.Get("$top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("$skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.SubscriptionNetworkManagerConnectionsClientListOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.SubscriptionNetworkManagerConnectionsClientListOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + resp := s.srv.NewListPager(options) + newListPager = &resp + s.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.SubscriptionNetworkManagerConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + s.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + s.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/time_rfc3339.go new file mode 100644 index 00000000000..b0535a7b63e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/time_rfc3339.go @@ -0,0 +1,86 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "regexp" + "strings" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time + +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON + if tzOffsetRegex.Match(data) { + layout = dateTimeJSON + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = dateTimeRFC3339(p) + return err +} + +func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*dateTimeRFC3339)(t) +} + +func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux dateTimeRFC3339 + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/usages_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/usages_server.go new file mode 100644 index 00000000000..e69a23a1355 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/usages_server.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// UsagesServer is a fake server for instances of the armnetwork.UsagesClient type. +type UsagesServer struct { + // NewListPager is the fake for method UsagesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(location string, options *armnetwork.UsagesClientListOptions) (resp azfake.PagerResponder[armnetwork.UsagesClientListResponse]) +} + +// NewUsagesServerTransport creates a new instance of UsagesServerTransport with the provided implementation. +// The returned UsagesServerTransport instance is connected to an instance of armnetwork.UsagesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewUsagesServerTransport(srv *UsagesServer) *UsagesServerTransport { + return &UsagesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.UsagesClientListResponse]](), + } +} + +// UsagesServerTransport connects instances of armnetwork.UsagesClient to instances of UsagesServer. +// Don't use this type directly, use NewUsagesServerTransport instead. +type UsagesServerTransport struct { + srv *UsagesServer + newListPager *tracker[azfake.PagerResponder[armnetwork.UsagesClientListResponse]] +} + +// Do implements the policy.Transporter interface for UsagesServerTransport. +func (u *UsagesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "UsagesClient.NewListPager": + resp, err = u.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (u *UsagesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if u.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := u.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/locations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/usages` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + locationParam, err := url.PathUnescape(matches[regex.SubexpIndex("location")]) + if err != nil { + return nil, err + } + resp := u.srv.NewListPager(locationParam, nil) + newListPager = &resp + u.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.UsagesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + u.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + u.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vipswap_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vipswap_server.go new file mode 100644 index 00000000000..bab23f3a6db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vipswap_server.go @@ -0,0 +1,197 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VipSwapServer is a fake server for instances of the armnetwork.VipSwapClient type. +type VipSwapServer struct { + // BeginCreate is the fake for method VipSwapClient.BeginCreate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCreate func(ctx context.Context, groupName string, resourceName string, parameters armnetwork.SwapResource, options *armnetwork.VipSwapClientBeginCreateOptions) (resp azfake.PollerResponder[armnetwork.VipSwapClientCreateResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VipSwapClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, groupName string, resourceName string, options *armnetwork.VipSwapClientGetOptions) (resp azfake.Responder[armnetwork.VipSwapClientGetResponse], errResp azfake.ErrorResponder) + + // List is the fake for method VipSwapClient.List + // HTTP status codes to indicate success: http.StatusOK + List func(ctx context.Context, groupName string, resourceName string, options *armnetwork.VipSwapClientListOptions) (resp azfake.Responder[armnetwork.VipSwapClientListResponse], errResp azfake.ErrorResponder) +} + +// NewVipSwapServerTransport creates a new instance of VipSwapServerTransport with the provided implementation. +// The returned VipSwapServerTransport instance is connected to an instance of armnetwork.VipSwapClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVipSwapServerTransport(srv *VipSwapServer) *VipSwapServerTransport { + return &VipSwapServerTransport{ + srv: srv, + beginCreate: newTracker[azfake.PollerResponder[armnetwork.VipSwapClientCreateResponse]](), + } +} + +// VipSwapServerTransport connects instances of armnetwork.VipSwapClient to instances of VipSwapServer. +// Don't use this type directly, use NewVipSwapServerTransport instead. +type VipSwapServerTransport struct { + srv *VipSwapServer + beginCreate *tracker[azfake.PollerResponder[armnetwork.VipSwapClientCreateResponse]] +} + +// Do implements the policy.Transporter interface for VipSwapServerTransport. +func (v *VipSwapServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VipSwapClient.BeginCreate": + resp, err = v.dispatchBeginCreate(req) + case "VipSwapClient.Get": + resp, err = v.dispatchGet(req) + case "VipSwapClient.List": + resp, err = v.dispatchList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VipSwapServerTransport) dispatchBeginCreate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreate not implemented")} + } + beginCreate := v.beginCreate.get(req) + if beginCreate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/cloudServiceSlots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SwapResource](req) + if err != nil { + return nil, err + } + groupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("groupName")]) + if err != nil { + return nil, err + } + resourceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreate(req.Context(), groupNameParam, resourceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreate = &respr + v.beginCreate.add(req, beginCreate) + } + + resp, err := server.PollerResponderNext(beginCreate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginCreate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreate) { + v.beginCreate.remove(req) + } + + return resp, nil +} + +func (v *VipSwapServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/cloudServiceSlots/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + groupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("groupName")]) + if err != nil { + return nil, err + } + resourceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), groupNameParam, resourceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SwapResource, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VipSwapServerTransport) dispatchList(req *http.Request) (*http.Response, error) { + if v.srv.List == nil { + return nil, &nonRetriableError{errors.New("fake for method List not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Compute/cloudServices/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/cloudServiceSlots` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + groupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("groupName")]) + if err != nil { + return nil, err + } + resourceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.List(req.Context(), groupNameParam, resourceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).SwapResourceListResult, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualapplianceconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualapplianceconnections_server.go new file mode 100644 index 00000000000..55ad4e5b442 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualapplianceconnections_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualApplianceConnectionsServer is a fake server for instances of the armnetwork.VirtualApplianceConnectionsClient type. +type VirtualApplianceConnectionsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualApplianceConnectionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, connectionName string, networkVirtualApplianceConnectionParameters armnetwork.VirtualApplianceConnection, options *armnetwork.VirtualApplianceConnectionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualApplianceConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualApplianceConnectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, connectionName string, options *armnetwork.VirtualApplianceConnectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualApplianceConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualApplianceConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, connectionName string, options *armnetwork.VirtualApplianceConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.VirtualApplianceConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualApplianceConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkVirtualApplianceName string, options *armnetwork.VirtualApplianceConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualApplianceConnectionsClientListResponse]) +} + +// NewVirtualApplianceConnectionsServerTransport creates a new instance of VirtualApplianceConnectionsServerTransport with the provided implementation. +// The returned VirtualApplianceConnectionsServerTransport instance is connected to an instance of armnetwork.VirtualApplianceConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualApplianceConnectionsServerTransport(srv *VirtualApplianceConnectionsServer) *VirtualApplianceConnectionsServerTransport { + return &VirtualApplianceConnectionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualApplianceConnectionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualApplianceConnectionsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualApplianceConnectionsClientListResponse]](), + } +} + +// VirtualApplianceConnectionsServerTransport connects instances of armnetwork.VirtualApplianceConnectionsClient to instances of VirtualApplianceConnectionsServer. +// Don't use this type directly, use NewVirtualApplianceConnectionsServerTransport instead. +type VirtualApplianceConnectionsServerTransport struct { + srv *VirtualApplianceConnectionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualApplianceConnectionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualApplianceConnectionsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualApplianceConnectionsClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualApplianceConnectionsServerTransport. +func (v *VirtualApplianceConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualApplianceConnectionsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualApplianceConnectionsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualApplianceConnectionsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualApplianceConnectionsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualApplianceConnectionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkVirtualApplianceConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualApplianceConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, connectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualApplianceConnectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkVirtualApplianceConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualApplianceConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkVirtualApplianceConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualApplianceConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualApplianceConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkVirtualApplianceConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, networkVirtualApplianceNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualApplianceConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualappliances_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualappliances_server.go new file mode 100644 index 00000000000..3a345e4d675 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualappliances_server.go @@ -0,0 +1,352 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualAppliancesServer is a fake server for instances of the armnetwork.VirtualAppliancesClient type. +type VirtualAppliancesServer struct { + // BeginCreateOrUpdate is the fake for method VirtualAppliancesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, parameters armnetwork.VirtualAppliance, options *armnetwork.VirtualAppliancesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualAppliancesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualAppliancesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, options *armnetwork.VirtualAppliancesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualAppliancesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualAppliancesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, options *armnetwork.VirtualAppliancesClientGetOptions) (resp azfake.Responder[armnetwork.VirtualAppliancesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualAppliancesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VirtualAppliancesClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualAppliancesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method VirtualAppliancesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VirtualAppliancesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VirtualAppliancesClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method VirtualAppliancesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, parameters armnetwork.TagsObject, options *armnetwork.VirtualAppliancesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.VirtualAppliancesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualAppliancesServerTransport creates a new instance of VirtualAppliancesServerTransport with the provided implementation. +// The returned VirtualAppliancesServerTransport instance is connected to an instance of armnetwork.VirtualAppliancesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualAppliancesServerTransport(srv *VirtualAppliancesServer) *VirtualAppliancesServerTransport { + return &VirtualAppliancesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualAppliancesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualAppliancesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualAppliancesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VirtualAppliancesClientListByResourceGroupResponse]](), + } +} + +// VirtualAppliancesServerTransport connects instances of armnetwork.VirtualAppliancesClient to instances of VirtualAppliancesServer. +// Don't use this type directly, use NewVirtualAppliancesServerTransport instead. +type VirtualAppliancesServerTransport struct { + srv *VirtualAppliancesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualAppliancesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualAppliancesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualAppliancesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VirtualAppliancesClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for VirtualAppliancesServerTransport. +func (v *VirtualAppliancesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualAppliancesClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualAppliancesClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualAppliancesClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualAppliancesClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualAppliancesClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + case "VirtualAppliancesClient.UpdateTags": + resp, err = v.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualAppliancesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualAppliance](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualAppliancesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualAppliancesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.VirtualAppliancesClientGetOptions + if expandParam != nil { + options = &armnetwork.VirtualAppliancesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualAppliance, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualAppliancesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualAppliancesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualAppliancesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VirtualAppliancesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (v *VirtualAppliancesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.UpdateTags(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualAppliance, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualappliancesites_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualappliancesites_server.go new file mode 100644 index 00000000000..d1dc39079a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualappliancesites_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualApplianceSitesServer is a fake server for instances of the armnetwork.VirtualApplianceSitesClient type. +type VirtualApplianceSitesServer struct { + // BeginCreateOrUpdate is the fake for method VirtualApplianceSitesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, parameters armnetwork.VirtualApplianceSite, options *armnetwork.VirtualApplianceSitesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualApplianceSitesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualApplianceSitesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *armnetwork.VirtualApplianceSitesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualApplianceSitesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualApplianceSitesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *armnetwork.VirtualApplianceSitesClientGetOptions) (resp azfake.Responder[armnetwork.VirtualApplianceSitesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualApplianceSitesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, networkVirtualApplianceName string, options *armnetwork.VirtualApplianceSitesClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualApplianceSitesClientListResponse]) +} + +// NewVirtualApplianceSitesServerTransport creates a new instance of VirtualApplianceSitesServerTransport with the provided implementation. +// The returned VirtualApplianceSitesServerTransport instance is connected to an instance of armnetwork.VirtualApplianceSitesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualApplianceSitesServerTransport(srv *VirtualApplianceSitesServer) *VirtualApplianceSitesServerTransport { + return &VirtualApplianceSitesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualApplianceSitesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualApplianceSitesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualApplianceSitesClientListResponse]](), + } +} + +// VirtualApplianceSitesServerTransport connects instances of armnetwork.VirtualApplianceSitesClient to instances of VirtualApplianceSitesServer. +// Don't use this type directly, use NewVirtualApplianceSitesServerTransport instead. +type VirtualApplianceSitesServerTransport struct { + srv *VirtualApplianceSitesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualApplianceSitesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualApplianceSitesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualApplianceSitesClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualApplianceSitesServerTransport. +func (v *VirtualApplianceSitesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualApplianceSitesClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualApplianceSitesClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualApplianceSitesClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualApplianceSitesClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualApplianceSitesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualApplianceSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualApplianceSite](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + siteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("siteName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, siteNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualApplianceSitesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualApplianceSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + siteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("siteName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, siteNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualApplianceSitesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualApplianceSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + siteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("siteName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, networkVirtualApplianceNameParam, siteNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualApplianceSite, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualApplianceSitesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualAppliances/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualApplianceSites` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkVirtualApplianceNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkVirtualApplianceName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, networkVirtualApplianceNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualApplianceSitesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualapplianceskus_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualapplianceskus_server.go new file mode 100644 index 00000000000..5a8ed21cce2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualapplianceskus_server.go @@ -0,0 +1,140 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualApplianceSKUsServer is a fake server for instances of the armnetwork.VirtualApplianceSKUsClient type. +type VirtualApplianceSKUsServer struct { + // Get is the fake for method VirtualApplianceSKUsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, skuName string, options *armnetwork.VirtualApplianceSKUsClientGetOptions) (resp azfake.Responder[armnetwork.VirtualApplianceSKUsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualApplianceSKUsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VirtualApplianceSKUsClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualApplianceSKUsClientListResponse]) +} + +// NewVirtualApplianceSKUsServerTransport creates a new instance of VirtualApplianceSKUsServerTransport with the provided implementation. +// The returned VirtualApplianceSKUsServerTransport instance is connected to an instance of armnetwork.VirtualApplianceSKUsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualApplianceSKUsServerTransport(srv *VirtualApplianceSKUsServer) *VirtualApplianceSKUsServerTransport { + return &VirtualApplianceSKUsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualApplianceSKUsClientListResponse]](), + } +} + +// VirtualApplianceSKUsServerTransport connects instances of armnetwork.VirtualApplianceSKUsClient to instances of VirtualApplianceSKUsServer. +// Don't use this type directly, use NewVirtualApplianceSKUsServerTransport instead. +type VirtualApplianceSKUsServerTransport struct { + srv *VirtualApplianceSKUsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualApplianceSKUsClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualApplianceSKUsServerTransport. +func (v *VirtualApplianceSKUsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualApplianceSKUsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualApplianceSKUsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualApplianceSKUsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualApplianceSkus/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + skuNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("skuName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), skuNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualApplianceSKU, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualApplianceSKUsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkVirtualApplianceSkus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualApplianceSKUsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubbgpconnection_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubbgpconnection_server.go new file mode 100644 index 00000000000..252ef27a48c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubbgpconnection_server.go @@ -0,0 +1,222 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualHubBgpConnectionServer is a fake server for instances of the armnetwork.VirtualHubBgpConnectionClient type. +type VirtualHubBgpConnectionServer struct { + // BeginCreateOrUpdate is the fake for method VirtualHubBgpConnectionClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, parameters armnetwork.BgpConnection, options *armnetwork.VirtualHubBgpConnectionClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualHubBgpConnectionClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *armnetwork.VirtualHubBgpConnectionClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualHubBgpConnectionClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *armnetwork.VirtualHubBgpConnectionClientGetOptions) (resp azfake.Responder[armnetwork.VirtualHubBgpConnectionClientGetResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualHubBgpConnectionServerTransport creates a new instance of VirtualHubBgpConnectionServerTransport with the provided implementation. +// The returned VirtualHubBgpConnectionServerTransport instance is connected to an instance of armnetwork.VirtualHubBgpConnectionClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualHubBgpConnectionServerTransport(srv *VirtualHubBgpConnectionServer) *VirtualHubBgpConnectionServerTransport { + return &VirtualHubBgpConnectionServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionClientDeleteResponse]](), + } +} + +// VirtualHubBgpConnectionServerTransport connects instances of armnetwork.VirtualHubBgpConnectionClient to instances of VirtualHubBgpConnectionServer. +// Don't use this type directly, use NewVirtualHubBgpConnectionServerTransport instead. +type VirtualHubBgpConnectionServerTransport struct { + srv *VirtualHubBgpConnectionServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionClientDeleteResponse]] +} + +// Do implements the policy.Transporter interface for VirtualHubBgpConnectionServerTransport. +func (v *VirtualHubBgpConnectionServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualHubBgpConnectionClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualHubBgpConnectionClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualHubBgpConnectionClient.Get": + resp, err = v.dispatchGet(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualHubBgpConnectionServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/bgpConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.BgpConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, connectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubBgpConnectionServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/bgpConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubBgpConnectionServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/bgpConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).BgpConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubbgpconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubbgpconnections_server.go new file mode 100644 index 00000000000..9cf1e887595 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubbgpconnections_server.go @@ -0,0 +1,225 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualHubBgpConnectionsServer is a fake server for instances of the armnetwork.VirtualHubBgpConnectionsClient type. +type VirtualHubBgpConnectionsServer struct { + // NewListPager is the fake for method VirtualHubBgpConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualHubName string, options *armnetwork.VirtualHubBgpConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualHubBgpConnectionsClientListResponse]) + + // BeginListAdvertisedRoutes is the fake for method VirtualHubBgpConnectionsClient.BeginListAdvertisedRoutes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListAdvertisedRoutes func(ctx context.Context, resourceGroupName string, hubName string, connectionName string, options *armnetwork.VirtualHubBgpConnectionsClientBeginListAdvertisedRoutesOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionsClientListAdvertisedRoutesResponse], errResp azfake.ErrorResponder) + + // BeginListLearnedRoutes is the fake for method VirtualHubBgpConnectionsClient.BeginListLearnedRoutes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListLearnedRoutes func(ctx context.Context, resourceGroupName string, hubName string, connectionName string, options *armnetwork.VirtualHubBgpConnectionsClientBeginListLearnedRoutesOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionsClientListLearnedRoutesResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualHubBgpConnectionsServerTransport creates a new instance of VirtualHubBgpConnectionsServerTransport with the provided implementation. +// The returned VirtualHubBgpConnectionsServerTransport instance is connected to an instance of armnetwork.VirtualHubBgpConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualHubBgpConnectionsServerTransport(srv *VirtualHubBgpConnectionsServer) *VirtualHubBgpConnectionsServerTransport { + return &VirtualHubBgpConnectionsServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualHubBgpConnectionsClientListResponse]](), + beginListAdvertisedRoutes: newTracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionsClientListAdvertisedRoutesResponse]](), + beginListLearnedRoutes: newTracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionsClientListLearnedRoutesResponse]](), + } +} + +// VirtualHubBgpConnectionsServerTransport connects instances of armnetwork.VirtualHubBgpConnectionsClient to instances of VirtualHubBgpConnectionsServer. +// Don't use this type directly, use NewVirtualHubBgpConnectionsServerTransport instead. +type VirtualHubBgpConnectionsServerTransport struct { + srv *VirtualHubBgpConnectionsServer + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualHubBgpConnectionsClientListResponse]] + beginListAdvertisedRoutes *tracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionsClientListAdvertisedRoutesResponse]] + beginListLearnedRoutes *tracker[azfake.PollerResponder[armnetwork.VirtualHubBgpConnectionsClientListLearnedRoutesResponse]] +} + +// Do implements the policy.Transporter interface for VirtualHubBgpConnectionsServerTransport. +func (v *VirtualHubBgpConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualHubBgpConnectionsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualHubBgpConnectionsClient.BeginListAdvertisedRoutes": + resp, err = v.dispatchBeginListAdvertisedRoutes(req) + case "VirtualHubBgpConnectionsClient.BeginListLearnedRoutes": + resp, err = v.dispatchBeginListLearnedRoutes(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualHubBgpConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/bgpConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, virtualHubNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualHubBgpConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualHubBgpConnectionsServerTransport) dispatchBeginListAdvertisedRoutes(req *http.Request) (*http.Response, error) { + if v.srv.BeginListAdvertisedRoutes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListAdvertisedRoutes not implemented")} + } + beginListAdvertisedRoutes := v.beginListAdvertisedRoutes.get(req) + if beginListAdvertisedRoutes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/bgpConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/advertisedRoutes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginListAdvertisedRoutes(req.Context(), resourceGroupNameParam, hubNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListAdvertisedRoutes = &respr + v.beginListAdvertisedRoutes.add(req, beginListAdvertisedRoutes) + } + + resp, err := server.PollerResponderNext(beginListAdvertisedRoutes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginListAdvertisedRoutes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListAdvertisedRoutes) { + v.beginListAdvertisedRoutes.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubBgpConnectionsServerTransport) dispatchBeginListLearnedRoutes(req *http.Request) (*http.Response, error) { + if v.srv.BeginListLearnedRoutes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListLearnedRoutes not implemented")} + } + beginListLearnedRoutes := v.beginListLearnedRoutes.get(req) + if beginListLearnedRoutes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/bgpConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/learnedRoutes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + hubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("hubName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginListLearnedRoutes(req.Context(), resourceGroupNameParam, hubNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListLearnedRoutes = &respr + v.beginListLearnedRoutes.add(req, beginListLearnedRoutes) + } + + resp, err := server.PollerResponderNext(beginListLearnedRoutes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginListLearnedRoutes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListLearnedRoutes) { + v.beginListLearnedRoutes.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubipconfiguration_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubipconfiguration_server.go new file mode 100644 index 00000000000..28b34bb92b1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubipconfiguration_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualHubIPConfigurationServer is a fake server for instances of the armnetwork.VirtualHubIPConfigurationClient type. +type VirtualHubIPConfigurationServer struct { + // BeginCreateOrUpdate is the fake for method VirtualHubIPConfigurationClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, ipConfigName string, parameters armnetwork.HubIPConfiguration, options *armnetwork.VirtualHubIPConfigurationClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubIPConfigurationClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualHubIPConfigurationClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, ipConfigName string, options *armnetwork.VirtualHubIPConfigurationClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubIPConfigurationClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualHubIPConfigurationClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, ipConfigName string, options *armnetwork.VirtualHubIPConfigurationClientGetOptions) (resp azfake.Responder[armnetwork.VirtualHubIPConfigurationClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualHubIPConfigurationClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualHubName string, options *armnetwork.VirtualHubIPConfigurationClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualHubIPConfigurationClientListResponse]) +} + +// NewVirtualHubIPConfigurationServerTransport creates a new instance of VirtualHubIPConfigurationServerTransport with the provided implementation. +// The returned VirtualHubIPConfigurationServerTransport instance is connected to an instance of armnetwork.VirtualHubIPConfigurationClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualHubIPConfigurationServerTransport(srv *VirtualHubIPConfigurationServer) *VirtualHubIPConfigurationServerTransport { + return &VirtualHubIPConfigurationServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualHubIPConfigurationClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualHubIPConfigurationClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualHubIPConfigurationClientListResponse]](), + } +} + +// VirtualHubIPConfigurationServerTransport connects instances of armnetwork.VirtualHubIPConfigurationClient to instances of VirtualHubIPConfigurationServer. +// Don't use this type directly, use NewVirtualHubIPConfigurationServerTransport instead. +type VirtualHubIPConfigurationServerTransport struct { + srv *VirtualHubIPConfigurationServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualHubIPConfigurationClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualHubIPConfigurationClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualHubIPConfigurationClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualHubIPConfigurationServerTransport. +func (v *VirtualHubIPConfigurationServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualHubIPConfigurationClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualHubIPConfigurationClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualHubIPConfigurationClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualHubIPConfigurationClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualHubIPConfigurationServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.HubIPConfiguration](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + ipConfigNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, ipConfigNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubIPConfigurationServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + ipConfigNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, ipConfigNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubIPConfigurationServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + ipConfigNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("ipConfigName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, ipConfigNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).HubIPConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualHubIPConfigurationServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, virtualHubNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualHubIPConfigurationClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubroutetablev2s_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubroutetablev2s_server.go new file mode 100644 index 00000000000..9bf3f07c43b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubroutetablev2s_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualHubRouteTableV2SServer is a fake server for instances of the armnetwork.VirtualHubRouteTableV2SClient type. +type VirtualHubRouteTableV2SServer struct { + // BeginCreateOrUpdate is the fake for method VirtualHubRouteTableV2SClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, virtualHubRouteTableV2Parameters armnetwork.VirtualHubRouteTableV2, options *armnetwork.VirtualHubRouteTableV2SClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubRouteTableV2SClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualHubRouteTableV2SClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *armnetwork.VirtualHubRouteTableV2SClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubRouteTableV2SClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualHubRouteTableV2SClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *armnetwork.VirtualHubRouteTableV2SClientGetOptions) (resp azfake.Responder[armnetwork.VirtualHubRouteTableV2SClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualHubRouteTableV2SClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualHubName string, options *armnetwork.VirtualHubRouteTableV2SClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualHubRouteTableV2SClientListResponse]) +} + +// NewVirtualHubRouteTableV2SServerTransport creates a new instance of VirtualHubRouteTableV2SServerTransport with the provided implementation. +// The returned VirtualHubRouteTableV2SServerTransport instance is connected to an instance of armnetwork.VirtualHubRouteTableV2SClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualHubRouteTableV2SServerTransport(srv *VirtualHubRouteTableV2SServer) *VirtualHubRouteTableV2SServerTransport { + return &VirtualHubRouteTableV2SServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualHubRouteTableV2SClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualHubRouteTableV2SClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualHubRouteTableV2SClientListResponse]](), + } +} + +// VirtualHubRouteTableV2SServerTransport connects instances of armnetwork.VirtualHubRouteTableV2SClient to instances of VirtualHubRouteTableV2SServer. +// Don't use this type directly, use NewVirtualHubRouteTableV2SServerTransport instead. +type VirtualHubRouteTableV2SServerTransport struct { + srv *VirtualHubRouteTableV2SServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualHubRouteTableV2SClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualHubRouteTableV2SClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualHubRouteTableV2SClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualHubRouteTableV2SServerTransport. +func (v *VirtualHubRouteTableV2SServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualHubRouteTableV2SClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualHubRouteTableV2SClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualHubRouteTableV2SClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualHubRouteTableV2SClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualHubRouteTableV2SServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualHubRouteTableV2](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeTableNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubRouteTableV2SServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeTableNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubRouteTableV2SServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTables/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + routeTableNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("routeTableName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, routeTableNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualHubRouteTableV2, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualHubRouteTableV2SServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/routeTables` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, virtualHubNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualHubRouteTableV2SClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubs_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubs_server.go new file mode 100644 index 00000000000..bcd76a37ada --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualhubs_server.go @@ -0,0 +1,515 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// VirtualHubsServer is a fake server for instances of the armnetwork.VirtualHubsClient type. +type VirtualHubsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualHubsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualHubName string, virtualHubParameters armnetwork.VirtualHub, options *armnetwork.VirtualHubsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualHubsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualHubName string, options *armnetwork.VirtualHubsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualHubsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualHubName string, options *armnetwork.VirtualHubsClientGetOptions) (resp azfake.Responder[armnetwork.VirtualHubsClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGetEffectiveVirtualHubRoutes is the fake for method VirtualHubsClient.BeginGetEffectiveVirtualHubRoutes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetEffectiveVirtualHubRoutes func(ctx context.Context, resourceGroupName string, virtualHubName string, options *armnetwork.VirtualHubsClientBeginGetEffectiveVirtualHubRoutesOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubsClientGetEffectiveVirtualHubRoutesResponse], errResp azfake.ErrorResponder) + + // BeginGetInboundRoutes is the fake for method VirtualHubsClient.BeginGetInboundRoutes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetInboundRoutes func(ctx context.Context, resourceGroupName string, virtualHubName string, getInboundRoutesParameters armnetwork.GetInboundRoutesParameters, options *armnetwork.VirtualHubsClientBeginGetInboundRoutesOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubsClientGetInboundRoutesResponse], errResp azfake.ErrorResponder) + + // BeginGetOutboundRoutes is the fake for method VirtualHubsClient.BeginGetOutboundRoutes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetOutboundRoutes func(ctx context.Context, resourceGroupName string, virtualHubName string, getOutboundRoutesParameters armnetwork.GetOutboundRoutesParameters, options *armnetwork.VirtualHubsClientBeginGetOutboundRoutesOptions) (resp azfake.PollerResponder[armnetwork.VirtualHubsClientGetOutboundRoutesResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualHubsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VirtualHubsClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualHubsClientListResponse]) + + // NewListByResourceGroupPager is the fake for method VirtualHubsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VirtualHubsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VirtualHubsClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method VirtualHubsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, virtualHubName string, virtualHubParameters armnetwork.TagsObject, options *armnetwork.VirtualHubsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.VirtualHubsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualHubsServerTransport creates a new instance of VirtualHubsServerTransport with the provided implementation. +// The returned VirtualHubsServerTransport instance is connected to an instance of armnetwork.VirtualHubsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualHubsServerTransport(srv *VirtualHubsServer) *VirtualHubsServerTransport { + return &VirtualHubsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualHubsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualHubsClientDeleteResponse]](), + beginGetEffectiveVirtualHubRoutes: newTracker[azfake.PollerResponder[armnetwork.VirtualHubsClientGetEffectiveVirtualHubRoutesResponse]](), + beginGetInboundRoutes: newTracker[azfake.PollerResponder[armnetwork.VirtualHubsClientGetInboundRoutesResponse]](), + beginGetOutboundRoutes: newTracker[azfake.PollerResponder[armnetwork.VirtualHubsClientGetOutboundRoutesResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualHubsClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VirtualHubsClientListByResourceGroupResponse]](), + } +} + +// VirtualHubsServerTransport connects instances of armnetwork.VirtualHubsClient to instances of VirtualHubsServer. +// Don't use this type directly, use NewVirtualHubsServerTransport instead. +type VirtualHubsServerTransport struct { + srv *VirtualHubsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualHubsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualHubsClientDeleteResponse]] + beginGetEffectiveVirtualHubRoutes *tracker[azfake.PollerResponder[armnetwork.VirtualHubsClientGetEffectiveVirtualHubRoutesResponse]] + beginGetInboundRoutes *tracker[azfake.PollerResponder[armnetwork.VirtualHubsClientGetInboundRoutesResponse]] + beginGetOutboundRoutes *tracker[azfake.PollerResponder[armnetwork.VirtualHubsClientGetOutboundRoutesResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualHubsClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VirtualHubsClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for VirtualHubsServerTransport. +func (v *VirtualHubsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualHubsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualHubsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualHubsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualHubsClient.BeginGetEffectiveVirtualHubRoutes": + resp, err = v.dispatchBeginGetEffectiveVirtualHubRoutes(req) + case "VirtualHubsClient.BeginGetInboundRoutes": + resp, err = v.dispatchBeginGetInboundRoutes(req) + case "VirtualHubsClient.BeginGetOutboundRoutes": + resp, err = v.dispatchBeginGetOutboundRoutes(req) + case "VirtualHubsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualHubsClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + case "VirtualHubsClient.UpdateTags": + resp, err = v.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualHub](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualHubNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualHubNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualHubNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualHub, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchBeginGetEffectiveVirtualHubRoutes(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetEffectiveVirtualHubRoutes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetEffectiveVirtualHubRoutes not implemented")} + } + beginGetEffectiveVirtualHubRoutes := v.beginGetEffectiveVirtualHubRoutes.get(req) + if beginGetEffectiveVirtualHubRoutes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/effectiveRoutes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.EffectiveRoutesParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + var options *armnetwork.VirtualHubsClientBeginGetEffectiveVirtualHubRoutesOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.VirtualHubsClientBeginGetEffectiveVirtualHubRoutesOptions{ + EffectiveRoutesParameters: &body, + } + } + respr, errRespr := v.srv.BeginGetEffectiveVirtualHubRoutes(req.Context(), resourceGroupNameParam, virtualHubNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetEffectiveVirtualHubRoutes = &respr + v.beginGetEffectiveVirtualHubRoutes.add(req, beginGetEffectiveVirtualHubRoutes) + } + + resp, err := server.PollerResponderNext(beginGetEffectiveVirtualHubRoutes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetEffectiveVirtualHubRoutes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetEffectiveVirtualHubRoutes) { + v.beginGetEffectiveVirtualHubRoutes.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchBeginGetInboundRoutes(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetInboundRoutes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetInboundRoutes not implemented")} + } + beginGetInboundRoutes := v.beginGetInboundRoutes.get(req) + if beginGetInboundRoutes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/inboundRoutes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.GetInboundRoutesParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetInboundRoutes(req.Context(), resourceGroupNameParam, virtualHubNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetInboundRoutes = &respr + v.beginGetInboundRoutes.add(req, beginGetInboundRoutes) + } + + resp, err := server.PollerResponderNext(beginGetInboundRoutes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetInboundRoutes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetInboundRoutes) { + v.beginGetInboundRoutes.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchBeginGetOutboundRoutes(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetOutboundRoutes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetOutboundRoutes not implemented")} + } + beginGetOutboundRoutes := v.beginGetOutboundRoutes.get(req) + if beginGetOutboundRoutes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/outboundRoutes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.GetOutboundRoutesParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetOutboundRoutes(req.Context(), resourceGroupNameParam, virtualHubNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetOutboundRoutes = &respr + v.beginGetOutboundRoutes.add(req, beginGetOutboundRoutes) + } + + resp, err := server.PollerResponderNext(beginGetOutboundRoutes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetOutboundRoutes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetOutboundRoutes) { + v.beginGetOutboundRoutes.remove(req) + } + + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualHubsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VirtualHubsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (v *VirtualHubsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualHubs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualHubNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualHubName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.UpdateTags(req.Context(), resourceGroupNameParam, virtualHubNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualHub, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgatewayconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgatewayconnections_server.go new file mode 100644 index 00000000000..4c577d5a7c7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgatewayconnections_server.go @@ -0,0 +1,686 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// VirtualNetworkGatewayConnectionsServer is a fake server for instances of the armnetwork.VirtualNetworkGatewayConnectionsClient type. +type VirtualNetworkGatewayConnectionsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualNetworkGatewayConnectionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters armnetwork.VirtualNetworkGatewayConnection, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualNetworkGatewayConnectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualNetworkGatewayConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *armnetwork.VirtualNetworkGatewayConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.VirtualNetworkGatewayConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGetIkeSas is the fake for method VirtualNetworkGatewayConnectionsClient.BeginGetIkeSas + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetIkeSas func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginGetIkeSasOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientGetIkeSasResponse], errResp azfake.ErrorResponder) + + // GetSharedKey is the fake for method VirtualNetworkGatewayConnectionsClient.GetSharedKey + // HTTP status codes to indicate success: http.StatusOK + GetSharedKey func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *armnetwork.VirtualNetworkGatewayConnectionsClientGetSharedKeyOptions) (resp azfake.Responder[armnetwork.VirtualNetworkGatewayConnectionsClientGetSharedKeyResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualNetworkGatewayConnectionsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.VirtualNetworkGatewayConnectionsClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientListResponse]) + + // BeginResetConnection is the fake for method VirtualNetworkGatewayConnectionsClient.BeginResetConnection + // HTTP status codes to indicate success: http.StatusAccepted + BeginResetConnection func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginResetConnectionOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientResetConnectionResponse], errResp azfake.ErrorResponder) + + // BeginResetSharedKey is the fake for method VirtualNetworkGatewayConnectionsClient.BeginResetSharedKey + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginResetSharedKey func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters armnetwork.ConnectionResetSharedKey, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginResetSharedKeyOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientResetSharedKeyResponse], errResp azfake.ErrorResponder) + + // BeginSetSharedKey is the fake for method VirtualNetworkGatewayConnectionsClient.BeginSetSharedKey + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginSetSharedKey func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters armnetwork.ConnectionSharedKey, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginSetSharedKeyOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientSetSharedKeyResponse], errResp azfake.ErrorResponder) + + // BeginStartPacketCapture is the fake for method VirtualNetworkGatewayConnectionsClient.BeginStartPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStartPacketCapture func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginStartPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientStartPacketCaptureResponse], errResp azfake.ErrorResponder) + + // BeginStopPacketCapture is the fake for method VirtualNetworkGatewayConnectionsClient.BeginStopPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStopPacketCapture func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters armnetwork.VPNPacketCaptureStopParameters, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginStopPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientStopPacketCaptureResponse], errResp azfake.ErrorResponder) + + // BeginUpdateTags is the fake for method VirtualNetworkGatewayConnectionsClient.BeginUpdateTags + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateTags func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters armnetwork.TagsObject, options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualNetworkGatewayConnectionsServerTransport creates a new instance of VirtualNetworkGatewayConnectionsServerTransport with the provided implementation. +// The returned VirtualNetworkGatewayConnectionsServerTransport instance is connected to an instance of armnetwork.VirtualNetworkGatewayConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualNetworkGatewayConnectionsServerTransport(srv *VirtualNetworkGatewayConnectionsServer) *VirtualNetworkGatewayConnectionsServerTransport { + return &VirtualNetworkGatewayConnectionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientDeleteResponse]](), + beginGetIkeSas: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientGetIkeSasResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientListResponse]](), + beginResetConnection: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientResetConnectionResponse]](), + beginResetSharedKey: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientResetSharedKeyResponse]](), + beginSetSharedKey: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientSetSharedKeyResponse]](), + beginStartPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientStartPacketCaptureResponse]](), + beginStopPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientStopPacketCaptureResponse]](), + beginUpdateTags: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientUpdateTagsResponse]](), + } +} + +// VirtualNetworkGatewayConnectionsServerTransport connects instances of armnetwork.VirtualNetworkGatewayConnectionsClient to instances of VirtualNetworkGatewayConnectionsServer. +// Don't use this type directly, use NewVirtualNetworkGatewayConnectionsServerTransport instead. +type VirtualNetworkGatewayConnectionsServerTransport struct { + srv *VirtualNetworkGatewayConnectionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientDeleteResponse]] + beginGetIkeSas *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientGetIkeSasResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientListResponse]] + beginResetConnection *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientResetConnectionResponse]] + beginResetSharedKey *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientResetSharedKeyResponse]] + beginSetSharedKey *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientSetSharedKeyResponse]] + beginStartPacketCapture *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientStartPacketCaptureResponse]] + beginStopPacketCapture *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientStopPacketCaptureResponse]] + beginUpdateTags *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayConnectionsClientUpdateTagsResponse]] +} + +// Do implements the policy.Transporter interface for VirtualNetworkGatewayConnectionsServerTransport. +func (v *VirtualNetworkGatewayConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualNetworkGatewayConnectionsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualNetworkGatewayConnectionsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualNetworkGatewayConnectionsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualNetworkGatewayConnectionsClient.BeginGetIkeSas": + resp, err = v.dispatchBeginGetIkeSas(req) + case "VirtualNetworkGatewayConnectionsClient.GetSharedKey": + resp, err = v.dispatchGetSharedKey(req) + case "VirtualNetworkGatewayConnectionsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualNetworkGatewayConnectionsClient.BeginResetConnection": + resp, err = v.dispatchBeginResetConnection(req) + case "VirtualNetworkGatewayConnectionsClient.BeginResetSharedKey": + resp, err = v.dispatchBeginResetSharedKey(req) + case "VirtualNetworkGatewayConnectionsClient.BeginSetSharedKey": + resp, err = v.dispatchBeginSetSharedKey(req) + case "VirtualNetworkGatewayConnectionsClient.BeginStartPacketCapture": + resp, err = v.dispatchBeginStartPacketCapture(req) + case "VirtualNetworkGatewayConnectionsClient.BeginStopPacketCapture": + resp, err = v.dispatchBeginStopPacketCapture(req) + case "VirtualNetworkGatewayConnectionsClient.BeginUpdateTags": + resp, err = v.dispatchBeginUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualNetworkGatewayConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetworkGatewayConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginGetIkeSas(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetIkeSas == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetIkeSas not implemented")} + } + beginGetIkeSas := v.beginGetIkeSas.get(req) + if beginGetIkeSas == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getikesas` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetIkeSas(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetIkeSas = &respr + v.beginGetIkeSas.add(req, beginGetIkeSas) + } + + resp, err := server.PollerResponderNext(beginGetIkeSas, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetIkeSas.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetIkeSas) { + v.beginGetIkeSas.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchGetSharedKey(req *http.Request) (*http.Response, error) { + if v.srv.GetSharedKey == nil { + return nil, &nonRetriableError{errors.New("fake for method GetSharedKey not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedkey` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.GetSharedKey(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).ConnectionSharedKey, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualNetworkGatewayConnectionsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginResetConnection(req *http.Request) (*http.Response, error) { + if v.srv.BeginResetConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginResetConnection not implemented")} + } + beginResetConnection := v.beginResetConnection.get(req) + if beginResetConnection == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resetconnection` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginResetConnection(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginResetConnection = &respr + v.beginResetConnection.add(req, beginResetConnection) + } + + resp, err := server.PollerResponderNext(beginResetConnection, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted}, resp.StatusCode) { + v.beginResetConnection.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginResetConnection) { + v.beginResetConnection.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginResetSharedKey(req *http.Request) (*http.Response, error) { + if v.srv.BeginResetSharedKey == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginResetSharedKey not implemented")} + } + beginResetSharedKey := v.beginResetSharedKey.get(req) + if beginResetSharedKey == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedkey/reset` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ConnectionResetSharedKey](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginResetSharedKey(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginResetSharedKey = &respr + v.beginResetSharedKey.add(req, beginResetSharedKey) + } + + resp, err := server.PollerResponderNext(beginResetSharedKey, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginResetSharedKey.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginResetSharedKey) { + v.beginResetSharedKey.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginSetSharedKey(req *http.Request) (*http.Response, error) { + if v.srv.BeginSetSharedKey == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginSetSharedKey not implemented")} + } + beginSetSharedKey := v.beginSetSharedKey.get(req) + if beginSetSharedKey == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/sharedkey` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ConnectionSharedKey](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginSetSharedKey(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginSetSharedKey = &respr + v.beginSetSharedKey.add(req, beginSetSharedKey) + } + + resp, err := server.PollerResponderNext(beginSetSharedKey, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginSetSharedKey.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginSetSharedKey) { + v.beginSetSharedKey.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginStartPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStartPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStartPacketCapture not implemented")} + } + beginStartPacketCapture := v.beginStartPacketCapture.get(req) + if beginStartPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/startPacketCapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNPacketCaptureStartParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + var options *armnetwork.VirtualNetworkGatewayConnectionsClientBeginStartPacketCaptureOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.VirtualNetworkGatewayConnectionsClientBeginStartPacketCaptureOptions{ + Parameters: &body, + } + } + respr, errRespr := v.srv.BeginStartPacketCapture(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStartPacketCapture = &respr + v.beginStartPacketCapture.add(req, beginStartPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStartPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStartPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStartPacketCapture) { + v.beginStartPacketCapture.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginStopPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStopPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStopPacketCapture not implemented")} + } + beginStopPacketCapture := v.beginStopPacketCapture.get(req) + if beginStopPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stopPacketCapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNPacketCaptureStopParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginStopPacketCapture(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStopPacketCapture = &respr + v.beginStopPacketCapture.add(req, beginStopPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStopPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStopPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStopPacketCapture) { + v.beginStopPacketCapture.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayConnectionsServerTransport) dispatchBeginUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateTags not implemented")} + } + beginUpdateTags := v.beginUpdateTags.get(req) + if beginUpdateTags == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdateTags(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateTags = &respr + v.beginUpdateTags.add(req, beginUpdateTags) + } + + resp, err := server.PollerResponderNext(beginUpdateTags, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginUpdateTags.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateTags) { + v.beginUpdateTags.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgatewaynatrules_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgatewaynatrules_server.go new file mode 100644 index 00000000000..3082cd33a0e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgatewaynatrules_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualNetworkGatewayNatRulesServer is a fake server for instances of the armnetwork.VirtualNetworkGatewayNatRulesClient type. +type VirtualNetworkGatewayNatRulesServer struct { + // BeginCreateOrUpdate is the fake for method VirtualNetworkGatewayNatRulesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, natRuleName string, natRuleParameters armnetwork.VirtualNetworkGatewayNatRule, options *armnetwork.VirtualNetworkGatewayNatRulesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualNetworkGatewayNatRulesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, natRuleName string, options *armnetwork.VirtualNetworkGatewayNatRulesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualNetworkGatewayNatRulesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, natRuleName string, options *armnetwork.VirtualNetworkGatewayNatRulesClientGetOptions) (resp azfake.Responder[armnetwork.VirtualNetworkGatewayNatRulesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByVirtualNetworkGatewayPager is the fake for method VirtualNetworkGatewayNatRulesClient.NewListByVirtualNetworkGatewayPager + // HTTP status codes to indicate success: http.StatusOK + NewListByVirtualNetworkGatewayPager func(resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse]) +} + +// NewVirtualNetworkGatewayNatRulesServerTransport creates a new instance of VirtualNetworkGatewayNatRulesServerTransport with the provided implementation. +// The returned VirtualNetworkGatewayNatRulesServerTransport instance is connected to an instance of armnetwork.VirtualNetworkGatewayNatRulesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualNetworkGatewayNatRulesServerTransport(srv *VirtualNetworkGatewayNatRulesServer) *VirtualNetworkGatewayNatRulesServerTransport { + return &VirtualNetworkGatewayNatRulesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientDeleteResponse]](), + newListByVirtualNetworkGatewayPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse]](), + } +} + +// VirtualNetworkGatewayNatRulesServerTransport connects instances of armnetwork.VirtualNetworkGatewayNatRulesClient to instances of VirtualNetworkGatewayNatRulesServer. +// Don't use this type directly, use NewVirtualNetworkGatewayNatRulesServerTransport instead. +type VirtualNetworkGatewayNatRulesServerTransport struct { + srv *VirtualNetworkGatewayNatRulesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientDeleteResponse]] + newListByVirtualNetworkGatewayPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse]] +} + +// Do implements the policy.Transporter interface for VirtualNetworkGatewayNatRulesServerTransport. +func (v *VirtualNetworkGatewayNatRulesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualNetworkGatewayNatRulesClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualNetworkGatewayNatRulesClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualNetworkGatewayNatRulesClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualNetworkGatewayNatRulesClient.NewListByVirtualNetworkGatewayPager": + resp, err = v.dispatchNewListByVirtualNetworkGatewayPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayNatRulesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualNetworkGatewayNatRule](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + natRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, natRuleNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayNatRulesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + natRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, natRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewayNatRulesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + natRuleNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("natRuleName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, natRuleNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetworkGatewayNatRule, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworkGatewayNatRulesServerTransport) dispatchNewListByVirtualNetworkGatewayPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByVirtualNetworkGatewayPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByVirtualNetworkGatewayPager not implemented")} + } + newListByVirtualNetworkGatewayPager := v.newListByVirtualNetworkGatewayPager.get(req) + if newListByVirtualNetworkGatewayPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/natRules` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByVirtualNetworkGatewayPager(resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + newListByVirtualNetworkGatewayPager = &resp + v.newListByVirtualNetworkGatewayPager.add(req, newListByVirtualNetworkGatewayPager) + server.PagerResponderInjectNextLinks(newListByVirtualNetworkGatewayPager, req, func(page *armnetwork.VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByVirtualNetworkGatewayPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByVirtualNetworkGatewayPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByVirtualNetworkGatewayPager) { + v.newListByVirtualNetworkGatewayPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgateways_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgateways_server.go new file mode 100644 index 00000000000..64f18bce4ba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkgateways_server.go @@ -0,0 +1,1231 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// VirtualNetworkGatewaysServer is a fake server for instances of the armnetwork.VirtualNetworkGatewaysClient type. +type VirtualNetworkGatewaysServer struct { + // BeginCreateOrUpdate is the fake for method VirtualNetworkGatewaysClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters armnetwork.VirtualNetworkGateway, options *armnetwork.VirtualNetworkGatewaysClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualNetworkGatewaysClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientDeleteResponse], errResp azfake.ErrorResponder) + + // BeginDisconnectVirtualNetworkGatewayVPNConnections is the fake for method VirtualNetworkGatewaysClient.BeginDisconnectVirtualNetworkGatewayVPNConnections + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDisconnectVirtualNetworkGatewayVPNConnections func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, request armnetwork.P2SVPNConnectionRequest, options *armnetwork.VirtualNetworkGatewaysClientBeginDisconnectVirtualNetworkGatewayVPNConnectionsOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientDisconnectVirtualNetworkGatewayVPNConnectionsResponse], errResp azfake.ErrorResponder) + + // BeginGenerateVPNProfile is the fake for method VirtualNetworkGatewaysClient.BeginGenerateVPNProfile + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGenerateVPNProfile func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters armnetwork.VPNClientParameters, options *armnetwork.VirtualNetworkGatewaysClientBeginGenerateVPNProfileOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGenerateVPNProfileResponse], errResp azfake.ErrorResponder) + + // BeginGeneratevpnclientpackage is the fake for method VirtualNetworkGatewaysClient.BeginGeneratevpnclientpackage + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGeneratevpnclientpackage func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters armnetwork.VPNClientParameters, options *armnetwork.VirtualNetworkGatewaysClientBeginGeneratevpnclientpackageOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGeneratevpnclientpackageResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualNetworkGatewaysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientGetOptions) (resp azfake.Responder[armnetwork.VirtualNetworkGatewaysClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGetAdvertisedRoutes is the fake for method VirtualNetworkGatewaysClient.BeginGetAdvertisedRoutes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetAdvertisedRoutes func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, peer string, options *armnetwork.VirtualNetworkGatewaysClientBeginGetAdvertisedRoutesOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetAdvertisedRoutesResponse], errResp azfake.ErrorResponder) + + // BeginGetBgpPeerStatus is the fake for method VirtualNetworkGatewaysClient.BeginGetBgpPeerStatus + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetBgpPeerStatus func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginGetBgpPeerStatusOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetBgpPeerStatusResponse], errResp azfake.ErrorResponder) + + // BeginGetLearnedRoutes is the fake for method VirtualNetworkGatewaysClient.BeginGetLearnedRoutes + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetLearnedRoutes func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginGetLearnedRoutesOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetLearnedRoutesResponse], errResp azfake.ErrorResponder) + + // BeginGetVPNProfilePackageURL is the fake for method VirtualNetworkGatewaysClient.BeginGetVPNProfilePackageURL + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetVPNProfilePackageURL func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginGetVPNProfilePackageURLOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVPNProfilePackageURLResponse], errResp azfake.ErrorResponder) + + // BeginGetVpnclientConnectionHealth is the fake for method VirtualNetworkGatewaysClient.BeginGetVpnclientConnectionHealth + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetVpnclientConnectionHealth func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginGetVpnclientConnectionHealthOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVpnclientConnectionHealthResponse], errResp azfake.ErrorResponder) + + // BeginGetVpnclientIPSecParameters is the fake for method VirtualNetworkGatewaysClient.BeginGetVpnclientIPSecParameters + // HTTP status codes to indicate success: http.StatusOK + BeginGetVpnclientIPSecParameters func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginGetVpnclientIPSecParametersOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVpnclientIPSecParametersResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualNetworkGatewaysClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.VirtualNetworkGatewaysClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworkGatewaysClientListResponse]) + + // NewListConnectionsPager is the fake for method VirtualNetworkGatewaysClient.NewListConnectionsPager + // HTTP status codes to indicate success: http.StatusOK + NewListConnectionsPager func(resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientListConnectionsOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworkGatewaysClientListConnectionsResponse]) + + // BeginReset is the fake for method VirtualNetworkGatewaysClient.BeginReset + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReset func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginResetOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientResetResponse], errResp azfake.ErrorResponder) + + // BeginResetVPNClientSharedKey is the fake for method VirtualNetworkGatewaysClient.BeginResetVPNClientSharedKey + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginResetVPNClientSharedKey func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginResetVPNClientSharedKeyOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientResetVPNClientSharedKeyResponse], errResp azfake.ErrorResponder) + + // BeginSetVpnclientIPSecParameters is the fake for method VirtualNetworkGatewaysClient.BeginSetVpnclientIPSecParameters + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginSetVpnclientIPSecParameters func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, vpnclientIPSecParams armnetwork.VPNClientIPsecParameters, options *armnetwork.VirtualNetworkGatewaysClientBeginSetVpnclientIPSecParametersOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientSetVpnclientIPSecParametersResponse], errResp azfake.ErrorResponder) + + // BeginStartPacketCapture is the fake for method VirtualNetworkGatewaysClient.BeginStartPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStartPacketCapture func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientBeginStartPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientStartPacketCaptureResponse], errResp azfake.ErrorResponder) + + // BeginStopPacketCapture is the fake for method VirtualNetworkGatewaysClient.BeginStopPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStopPacketCapture func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters armnetwork.VPNPacketCaptureStopParameters, options *armnetwork.VirtualNetworkGatewaysClientBeginStopPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientStopPacketCaptureResponse], errResp azfake.ErrorResponder) + + // SupportedVPNDevices is the fake for method VirtualNetworkGatewaysClient.SupportedVPNDevices + // HTTP status codes to indicate success: http.StatusOK + SupportedVPNDevices func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientSupportedVPNDevicesOptions) (resp azfake.Responder[armnetwork.VirtualNetworkGatewaysClientSupportedVPNDevicesResponse], errResp azfake.ErrorResponder) + + // BeginUpdateTags is the fake for method VirtualNetworkGatewaysClient.BeginUpdateTags + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateTags func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters armnetwork.TagsObject, options *armnetwork.VirtualNetworkGatewaysClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientUpdateTagsResponse], errResp azfake.ErrorResponder) + + // VPNDeviceConfigurationScript is the fake for method VirtualNetworkGatewaysClient.VPNDeviceConfigurationScript + // HTTP status codes to indicate success: http.StatusOK + VPNDeviceConfigurationScript func(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters armnetwork.VPNDeviceScriptParameters, options *armnetwork.VirtualNetworkGatewaysClientVPNDeviceConfigurationScriptOptions) (resp azfake.Responder[armnetwork.VirtualNetworkGatewaysClientVPNDeviceConfigurationScriptResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualNetworkGatewaysServerTransport creates a new instance of VirtualNetworkGatewaysServerTransport with the provided implementation. +// The returned VirtualNetworkGatewaysServerTransport instance is connected to an instance of armnetwork.VirtualNetworkGatewaysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualNetworkGatewaysServerTransport(srv *VirtualNetworkGatewaysServer) *VirtualNetworkGatewaysServerTransport { + return &VirtualNetworkGatewaysServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientDeleteResponse]](), + beginDisconnectVirtualNetworkGatewayVPNConnections: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientDisconnectVirtualNetworkGatewayVPNConnectionsResponse]](), + beginGenerateVPNProfile: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGenerateVPNProfileResponse]](), + beginGeneratevpnclientpackage: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGeneratevpnclientpackageResponse]](), + beginGetAdvertisedRoutes: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetAdvertisedRoutesResponse]](), + beginGetBgpPeerStatus: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetBgpPeerStatusResponse]](), + beginGetLearnedRoutes: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetLearnedRoutesResponse]](), + beginGetVPNProfilePackageURL: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVPNProfilePackageURLResponse]](), + beginGetVpnclientConnectionHealth: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVpnclientConnectionHealthResponse]](), + beginGetVpnclientIPSecParameters: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVpnclientIPSecParametersResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewaysClientListResponse]](), + newListConnectionsPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewaysClientListConnectionsResponse]](), + beginReset: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientResetResponse]](), + beginResetVPNClientSharedKey: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientResetVPNClientSharedKeyResponse]](), + beginSetVpnclientIPSecParameters: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientSetVpnclientIPSecParametersResponse]](), + beginStartPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientStartPacketCaptureResponse]](), + beginStopPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientStopPacketCaptureResponse]](), + beginUpdateTags: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientUpdateTagsResponse]](), + } +} + +// VirtualNetworkGatewaysServerTransport connects instances of armnetwork.VirtualNetworkGatewaysClient to instances of VirtualNetworkGatewaysServer. +// Don't use this type directly, use NewVirtualNetworkGatewaysServerTransport instead. +type VirtualNetworkGatewaysServerTransport struct { + srv *VirtualNetworkGatewaysServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientDeleteResponse]] + beginDisconnectVirtualNetworkGatewayVPNConnections *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientDisconnectVirtualNetworkGatewayVPNConnectionsResponse]] + beginGenerateVPNProfile *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGenerateVPNProfileResponse]] + beginGeneratevpnclientpackage *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGeneratevpnclientpackageResponse]] + beginGetAdvertisedRoutes *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetAdvertisedRoutesResponse]] + beginGetBgpPeerStatus *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetBgpPeerStatusResponse]] + beginGetLearnedRoutes *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetLearnedRoutesResponse]] + beginGetVPNProfilePackageURL *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVPNProfilePackageURLResponse]] + beginGetVpnclientConnectionHealth *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVpnclientConnectionHealthResponse]] + beginGetVpnclientIPSecParameters *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientGetVpnclientIPSecParametersResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewaysClientListResponse]] + newListConnectionsPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworkGatewaysClientListConnectionsResponse]] + beginReset *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientResetResponse]] + beginResetVPNClientSharedKey *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientResetVPNClientSharedKeyResponse]] + beginSetVpnclientIPSecParameters *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientSetVpnclientIPSecParametersResponse]] + beginStartPacketCapture *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientStartPacketCaptureResponse]] + beginStopPacketCapture *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientStopPacketCaptureResponse]] + beginUpdateTags *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkGatewaysClientUpdateTagsResponse]] +} + +// Do implements the policy.Transporter interface for VirtualNetworkGatewaysServerTransport. +func (v *VirtualNetworkGatewaysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualNetworkGatewaysClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualNetworkGatewaysClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualNetworkGatewaysClient.BeginDisconnectVirtualNetworkGatewayVPNConnections": + resp, err = v.dispatchBeginDisconnectVirtualNetworkGatewayVPNConnections(req) + case "VirtualNetworkGatewaysClient.BeginGenerateVPNProfile": + resp, err = v.dispatchBeginGenerateVPNProfile(req) + case "VirtualNetworkGatewaysClient.BeginGeneratevpnclientpackage": + resp, err = v.dispatchBeginGeneratevpnclientpackage(req) + case "VirtualNetworkGatewaysClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualNetworkGatewaysClient.BeginGetAdvertisedRoutes": + resp, err = v.dispatchBeginGetAdvertisedRoutes(req) + case "VirtualNetworkGatewaysClient.BeginGetBgpPeerStatus": + resp, err = v.dispatchBeginGetBgpPeerStatus(req) + case "VirtualNetworkGatewaysClient.BeginGetLearnedRoutes": + resp, err = v.dispatchBeginGetLearnedRoutes(req) + case "VirtualNetworkGatewaysClient.BeginGetVPNProfilePackageURL": + resp, err = v.dispatchBeginGetVPNProfilePackageURL(req) + case "VirtualNetworkGatewaysClient.BeginGetVpnclientConnectionHealth": + resp, err = v.dispatchBeginGetVpnclientConnectionHealth(req) + case "VirtualNetworkGatewaysClient.BeginGetVpnclientIPSecParameters": + resp, err = v.dispatchBeginGetVpnclientIPSecParameters(req) + case "VirtualNetworkGatewaysClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualNetworkGatewaysClient.NewListConnectionsPager": + resp, err = v.dispatchNewListConnectionsPager(req) + case "VirtualNetworkGatewaysClient.BeginReset": + resp, err = v.dispatchBeginReset(req) + case "VirtualNetworkGatewaysClient.BeginResetVPNClientSharedKey": + resp, err = v.dispatchBeginResetVPNClientSharedKey(req) + case "VirtualNetworkGatewaysClient.BeginSetVpnclientIPSecParameters": + resp, err = v.dispatchBeginSetVpnclientIPSecParameters(req) + case "VirtualNetworkGatewaysClient.BeginStartPacketCapture": + resp, err = v.dispatchBeginStartPacketCapture(req) + case "VirtualNetworkGatewaysClient.BeginStopPacketCapture": + resp, err = v.dispatchBeginStopPacketCapture(req) + case "VirtualNetworkGatewaysClient.SupportedVPNDevices": + resp, err = v.dispatchSupportedVPNDevices(req) + case "VirtualNetworkGatewaysClient.BeginUpdateTags": + resp, err = v.dispatchBeginUpdateTags(req) + case "VirtualNetworkGatewaysClient.VPNDeviceConfigurationScript": + resp, err = v.dispatchVPNDeviceConfigurationScript(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualNetworkGateway](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginDisconnectVirtualNetworkGatewayVPNConnections(req *http.Request) (*http.Response, error) { + if v.srv.BeginDisconnectVirtualNetworkGatewayVPNConnections == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDisconnectVirtualNetworkGatewayVPNConnections not implemented")} + } + beginDisconnectVirtualNetworkGatewayVPNConnections := v.beginDisconnectVirtualNetworkGatewayVPNConnections.get(req) + if beginDisconnectVirtualNetworkGatewayVPNConnections == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/disconnectVirtualNetworkGatewayVpnConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.P2SVPNConnectionRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDisconnectVirtualNetworkGatewayVPNConnections(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDisconnectVirtualNetworkGatewayVPNConnections = &respr + v.beginDisconnectVirtualNetworkGatewayVPNConnections.add(req, beginDisconnectVirtualNetworkGatewayVPNConnections) + } + + resp, err := server.PollerResponderNext(beginDisconnectVirtualNetworkGatewayVPNConnections, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginDisconnectVirtualNetworkGatewayVPNConnections.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDisconnectVirtualNetworkGatewayVPNConnections) { + v.beginDisconnectVirtualNetworkGatewayVPNConnections.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGenerateVPNProfile(req *http.Request) (*http.Response, error) { + if v.srv.BeginGenerateVPNProfile == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGenerateVPNProfile not implemented")} + } + beginGenerateVPNProfile := v.beginGenerateVPNProfile.get(req) + if beginGenerateVPNProfile == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/generatevpnprofile` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNClientParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGenerateVPNProfile(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGenerateVPNProfile = &respr + v.beginGenerateVPNProfile.add(req, beginGenerateVPNProfile) + } + + resp, err := server.PollerResponderNext(beginGenerateVPNProfile, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGenerateVPNProfile.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGenerateVPNProfile) { + v.beginGenerateVPNProfile.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGeneratevpnclientpackage(req *http.Request) (*http.Response, error) { + if v.srv.BeginGeneratevpnclientpackage == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGeneratevpnclientpackage not implemented")} + } + beginGeneratevpnclientpackage := v.beginGeneratevpnclientpackage.get(req) + if beginGeneratevpnclientpackage == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/generatevpnclientpackage` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNClientParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGeneratevpnclientpackage(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGeneratevpnclientpackage = &respr + v.beginGeneratevpnclientpackage.add(req, beginGeneratevpnclientpackage) + } + + resp, err := server.PollerResponderNext(beginGeneratevpnclientpackage, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGeneratevpnclientpackage.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGeneratevpnclientpackage) { + v.beginGeneratevpnclientpackage.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetworkGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGetAdvertisedRoutes(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetAdvertisedRoutes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetAdvertisedRoutes not implemented")} + } + beginGetAdvertisedRoutes := v.beginGetAdvertisedRoutes.get(req) + if beginGetAdvertisedRoutes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getAdvertisedRoutes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + peerParam, err := url.QueryUnescape(qp.Get("peer")) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetAdvertisedRoutes(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, peerParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetAdvertisedRoutes = &respr + v.beginGetAdvertisedRoutes.add(req, beginGetAdvertisedRoutes) + } + + resp, err := server.PollerResponderNext(beginGetAdvertisedRoutes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetAdvertisedRoutes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetAdvertisedRoutes) { + v.beginGetAdvertisedRoutes.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGetBgpPeerStatus(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetBgpPeerStatus == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetBgpPeerStatus not implemented")} + } + beginGetBgpPeerStatus := v.beginGetBgpPeerStatus.get(req) + if beginGetBgpPeerStatus == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getBgpPeerStatus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + peerUnescaped, err := url.QueryUnescape(qp.Get("peer")) + if err != nil { + return nil, err + } + peerParam := getOptional(peerUnescaped) + var options *armnetwork.VirtualNetworkGatewaysClientBeginGetBgpPeerStatusOptions + if peerParam != nil { + options = &armnetwork.VirtualNetworkGatewaysClientBeginGetBgpPeerStatusOptions{ + Peer: peerParam, + } + } + respr, errRespr := v.srv.BeginGetBgpPeerStatus(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetBgpPeerStatus = &respr + v.beginGetBgpPeerStatus.add(req, beginGetBgpPeerStatus) + } + + resp, err := server.PollerResponderNext(beginGetBgpPeerStatus, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetBgpPeerStatus.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetBgpPeerStatus) { + v.beginGetBgpPeerStatus.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGetLearnedRoutes(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetLearnedRoutes == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetLearnedRoutes not implemented")} + } + beginGetLearnedRoutes := v.beginGetLearnedRoutes.get(req) + if beginGetLearnedRoutes == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getLearnedRoutes` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetLearnedRoutes(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetLearnedRoutes = &respr + v.beginGetLearnedRoutes.add(req, beginGetLearnedRoutes) + } + + resp, err := server.PollerResponderNext(beginGetLearnedRoutes, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetLearnedRoutes.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetLearnedRoutes) { + v.beginGetLearnedRoutes.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGetVPNProfilePackageURL(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetVPNProfilePackageURL == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetVPNProfilePackageURL not implemented")} + } + beginGetVPNProfilePackageURL := v.beginGetVPNProfilePackageURL.get(req) + if beginGetVPNProfilePackageURL == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getvpnprofilepackageurl` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetVPNProfilePackageURL(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetVPNProfilePackageURL = &respr + v.beginGetVPNProfilePackageURL.add(req, beginGetVPNProfilePackageURL) + } + + resp, err := server.PollerResponderNext(beginGetVPNProfilePackageURL, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetVPNProfilePackageURL.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetVPNProfilePackageURL) { + v.beginGetVPNProfilePackageURL.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGetVpnclientConnectionHealth(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetVpnclientConnectionHealth == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetVpnclientConnectionHealth not implemented")} + } + beginGetVpnclientConnectionHealth := v.beginGetVpnclientConnectionHealth.get(req) + if beginGetVpnclientConnectionHealth == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getVpnClientConnectionHealth` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetVpnclientConnectionHealth(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetVpnclientConnectionHealth = &respr + v.beginGetVpnclientConnectionHealth.add(req, beginGetVpnclientConnectionHealth) + } + + resp, err := server.PollerResponderNext(beginGetVpnclientConnectionHealth, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetVpnclientConnectionHealth.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetVpnclientConnectionHealth) { + v.beginGetVpnclientConnectionHealth.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginGetVpnclientIPSecParameters(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetVpnclientIPSecParameters == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetVpnclientIPSecParameters not implemented")} + } + beginGetVpnclientIPSecParameters := v.beginGetVpnclientIPSecParameters.get(req) + if beginGetVpnclientIPSecParameters == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getvpnclientipsecparameters` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetVpnclientIPSecParameters(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetVpnclientIPSecParameters = &respr + v.beginGetVpnclientIPSecParameters.add(req, beginGetVpnclientIPSecParameters) + } + + resp, err := server.PollerResponderNext(beginGetVpnclientIPSecParameters, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.beginGetVpnclientIPSecParameters.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetVpnclientIPSecParameters) { + v.beginGetVpnclientIPSecParameters.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualNetworkGatewaysClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchNewListConnectionsPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListConnectionsPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListConnectionsPager not implemented")} + } + newListConnectionsPager := v.newListConnectionsPager.get(req) + if newListConnectionsPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListConnectionsPager(resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + newListConnectionsPager = &resp + v.newListConnectionsPager.add(req, newListConnectionsPager) + server.PagerResponderInjectNextLinks(newListConnectionsPager, req, func(page *armnetwork.VirtualNetworkGatewaysClientListConnectionsResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListConnectionsPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListConnectionsPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListConnectionsPager) { + v.newListConnectionsPager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginReset(req *http.Request) (*http.Response, error) { + if v.srv.BeginReset == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReset not implemented")} + } + beginReset := v.beginReset.get(req) + if beginReset == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reset` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + gatewayVipUnescaped, err := url.QueryUnescape(qp.Get("gatewayVip")) + if err != nil { + return nil, err + } + gatewayVipParam := getOptional(gatewayVipUnescaped) + var options *armnetwork.VirtualNetworkGatewaysClientBeginResetOptions + if gatewayVipParam != nil { + options = &armnetwork.VirtualNetworkGatewaysClientBeginResetOptions{ + GatewayVip: gatewayVipParam, + } + } + respr, errRespr := v.srv.BeginReset(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReset = &respr + v.beginReset.add(req, beginReset) + } + + resp, err := server.PollerResponderNext(beginReset, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReset.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReset) { + v.beginReset.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginResetVPNClientSharedKey(req *http.Request) (*http.Response, error) { + if v.srv.BeginResetVPNClientSharedKey == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginResetVPNClientSharedKey not implemented")} + } + beginResetVPNClientSharedKey := v.beginResetVPNClientSharedKey.get(req) + if beginResetVPNClientSharedKey == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resetvpnclientsharedkey` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginResetVPNClientSharedKey(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginResetVPNClientSharedKey = &respr + v.beginResetVPNClientSharedKey.add(req, beginResetVPNClientSharedKey) + } + + resp, err := server.PollerResponderNext(beginResetVPNClientSharedKey, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginResetVPNClientSharedKey.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginResetVPNClientSharedKey) { + v.beginResetVPNClientSharedKey.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginSetVpnclientIPSecParameters(req *http.Request) (*http.Response, error) { + if v.srv.BeginSetVpnclientIPSecParameters == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginSetVpnclientIPSecParameters not implemented")} + } + beginSetVpnclientIPSecParameters := v.beginSetVpnclientIPSecParameters.get(req) + if beginSetVpnclientIPSecParameters == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/setvpnclientipsecparameters` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNClientIPsecParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginSetVpnclientIPSecParameters(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginSetVpnclientIPSecParameters = &respr + v.beginSetVpnclientIPSecParameters.add(req, beginSetVpnclientIPSecParameters) + } + + resp, err := server.PollerResponderNext(beginSetVpnclientIPSecParameters, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginSetVpnclientIPSecParameters.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginSetVpnclientIPSecParameters) { + v.beginSetVpnclientIPSecParameters.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginStartPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStartPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStartPacketCapture not implemented")} + } + beginStartPacketCapture := v.beginStartPacketCapture.get(req) + if beginStartPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/startPacketCapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNPacketCaptureStartParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + var options *armnetwork.VirtualNetworkGatewaysClientBeginStartPacketCaptureOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.VirtualNetworkGatewaysClientBeginStartPacketCaptureOptions{ + Parameters: &body, + } + } + respr, errRespr := v.srv.BeginStartPacketCapture(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStartPacketCapture = &respr + v.beginStartPacketCapture.add(req, beginStartPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStartPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStartPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStartPacketCapture) { + v.beginStartPacketCapture.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginStopPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStopPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStopPacketCapture not implemented")} + } + beginStopPacketCapture := v.beginStopPacketCapture.get(req) + if beginStopPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stopPacketCapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNPacketCaptureStopParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginStopPacketCapture(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStopPacketCapture = &respr + v.beginStopPacketCapture.add(req, beginStopPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStopPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStopPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStopPacketCapture) { + v.beginStopPacketCapture.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchSupportedVPNDevices(req *http.Request) (*http.Response, error) { + if v.srv.SupportedVPNDevices == nil { + return nil, &nonRetriableError{errors.New("fake for method SupportedVPNDevices not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/supportedvpndevices` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.SupportedVPNDevices(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Value, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchBeginUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateTags not implemented")} + } + beginUpdateTags := v.beginUpdateTags.get(req) + if beginUpdateTags == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdateTags(req.Context(), resourceGroupNameParam, virtualNetworkGatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateTags = &respr + v.beginUpdateTags.add(req, beginUpdateTags) + } + + resp, err := server.PollerResponderNext(beginUpdateTags, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginUpdateTags.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateTags) { + v.beginUpdateTags.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkGatewaysServerTransport) dispatchVPNDeviceConfigurationScript(req *http.Request) (*http.Response, error) { + if v.srv.VPNDeviceConfigurationScript == nil { + return nil, &nonRetriableError{errors.New("fake for method VPNDeviceConfigurationScript not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/connections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpndeviceconfigurationscript` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNDeviceScriptParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkGatewayConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkGatewayConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.VPNDeviceConfigurationScript(req.Context(), resourceGroupNameParam, virtualNetworkGatewayConnectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Value, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkpeerings_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkpeerings_server.go new file mode 100644 index 00000000000..1da3ff482f7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworkpeerings_server.go @@ -0,0 +1,284 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualNetworkPeeringsServer is a fake server for instances of the armnetwork.VirtualNetworkPeeringsClient type. +type VirtualNetworkPeeringsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualNetworkPeeringsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters armnetwork.VirtualNetworkPeering, options *armnetwork.VirtualNetworkPeeringsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkPeeringsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualNetworkPeeringsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, options *armnetwork.VirtualNetworkPeeringsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkPeeringsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualNetworkPeeringsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, options *armnetwork.VirtualNetworkPeeringsClientGetOptions) (resp azfake.Responder[armnetwork.VirtualNetworkPeeringsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualNetworkPeeringsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualNetworkName string, options *armnetwork.VirtualNetworkPeeringsClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworkPeeringsClientListResponse]) +} + +// NewVirtualNetworkPeeringsServerTransport creates a new instance of VirtualNetworkPeeringsServerTransport with the provided implementation. +// The returned VirtualNetworkPeeringsServerTransport instance is connected to an instance of armnetwork.VirtualNetworkPeeringsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualNetworkPeeringsServerTransport(srv *VirtualNetworkPeeringsServer) *VirtualNetworkPeeringsServerTransport { + return &VirtualNetworkPeeringsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkPeeringsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkPeeringsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworkPeeringsClientListResponse]](), + } +} + +// VirtualNetworkPeeringsServerTransport connects instances of armnetwork.VirtualNetworkPeeringsClient to instances of VirtualNetworkPeeringsServer. +// Don't use this type directly, use NewVirtualNetworkPeeringsServerTransport instead. +type VirtualNetworkPeeringsServerTransport struct { + srv *VirtualNetworkPeeringsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkPeeringsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkPeeringsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworkPeeringsClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualNetworkPeeringsServerTransport. +func (v *VirtualNetworkPeeringsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualNetworkPeeringsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualNetworkPeeringsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualNetworkPeeringsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualNetworkPeeringsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualNetworkPeeringsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualNetworkPeerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualNetworkPeering](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + virtualNetworkPeeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkPeeringName")]) + if err != nil { + return nil, err + } + syncRemoteAddressSpaceUnescaped, err := url.QueryUnescape(qp.Get("syncRemoteAddressSpace")) + if err != nil { + return nil, err + } + syncRemoteAddressSpaceParam := getOptional(armnetwork.SyncRemoteAddressSpace(syncRemoteAddressSpaceUnescaped)) + var options *armnetwork.VirtualNetworkPeeringsClientBeginCreateOrUpdateOptions + if syncRemoteAddressSpaceParam != nil { + options = &armnetwork.VirtualNetworkPeeringsClientBeginCreateOrUpdateOptions{ + SyncRemoteAddressSpace: syncRemoteAddressSpaceParam, + } + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, virtualNetworkPeeringNameParam, body, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkPeeringsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualNetworkPeerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + virtualNetworkPeeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkPeeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, virtualNetworkPeeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkPeeringsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualNetworkPeerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + virtualNetworkPeeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkPeeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, virtualNetworkPeeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetworkPeering, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworkPeeringsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/virtualNetworkPeerings` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, virtualNetworkNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualNetworkPeeringsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworks_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworks_server.go new file mode 100644 index 00000000000..f5e588beacf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworks_server.go @@ -0,0 +1,525 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// VirtualNetworksServer is a fake server for instances of the armnetwork.VirtualNetworksClient type. +type VirtualNetworksServer struct { + // CheckIPAddressAvailability is the fake for method VirtualNetworksClient.CheckIPAddressAvailability + // HTTP status codes to indicate success: http.StatusOK + CheckIPAddressAvailability func(ctx context.Context, resourceGroupName string, virtualNetworkName string, ipAddress string, options *armnetwork.VirtualNetworksClientCheckIPAddressAvailabilityOptions) (resp azfake.Responder[armnetwork.VirtualNetworksClientCheckIPAddressAvailabilityResponse], errResp azfake.ErrorResponder) + + // BeginCreateOrUpdate is the fake for method VirtualNetworksClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters armnetwork.VirtualNetwork, options *armnetwork.VirtualNetworksClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworksClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualNetworksClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualNetworkName string, options *armnetwork.VirtualNetworksClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworksClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualNetworksClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualNetworkName string, options *armnetwork.VirtualNetworksClientGetOptions) (resp azfake.Responder[armnetwork.VirtualNetworksClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualNetworksClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.VirtualNetworksClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworksClientListResponse]) + + // NewListAllPager is the fake for method VirtualNetworksClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.VirtualNetworksClientListAllOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworksClientListAllResponse]) + + // BeginListDdosProtectionStatus is the fake for method VirtualNetworksClient.BeginListDdosProtectionStatus + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListDdosProtectionStatus func(ctx context.Context, resourceGroupName string, virtualNetworkName string, options *armnetwork.VirtualNetworksClientBeginListDdosProtectionStatusOptions) (resp azfake.PollerResponder[azfake.PagerResponder[armnetwork.VirtualNetworksClientListDdosProtectionStatusResponse]], errResp azfake.ErrorResponder) + + // NewListUsagePager is the fake for method VirtualNetworksClient.NewListUsagePager + // HTTP status codes to indicate success: http.StatusOK + NewListUsagePager func(resourceGroupName string, virtualNetworkName string, options *armnetwork.VirtualNetworksClientListUsageOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworksClientListUsageResponse]) + + // UpdateTags is the fake for method VirtualNetworksClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters armnetwork.TagsObject, options *armnetwork.VirtualNetworksClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.VirtualNetworksClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualNetworksServerTransport creates a new instance of VirtualNetworksServerTransport with the provided implementation. +// The returned VirtualNetworksServerTransport instance is connected to an instance of armnetwork.VirtualNetworksClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualNetworksServerTransport(srv *VirtualNetworksServer) *VirtualNetworksServerTransport { + return &VirtualNetworksServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworksClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworksClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworksClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworksClientListAllResponse]](), + beginListDdosProtectionStatus: newTracker[azfake.PollerResponder[azfake.PagerResponder[armnetwork.VirtualNetworksClientListDdosProtectionStatusResponse]]](), + newListUsagePager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworksClientListUsageResponse]](), + } +} + +// VirtualNetworksServerTransport connects instances of armnetwork.VirtualNetworksClient to instances of VirtualNetworksServer. +// Don't use this type directly, use NewVirtualNetworksServerTransport instead. +type VirtualNetworksServerTransport struct { + srv *VirtualNetworksServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualNetworksClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualNetworksClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworksClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworksClientListAllResponse]] + beginListDdosProtectionStatus *tracker[azfake.PollerResponder[azfake.PagerResponder[armnetwork.VirtualNetworksClientListDdosProtectionStatusResponse]]] + newListUsagePager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworksClientListUsageResponse]] +} + +// Do implements the policy.Transporter interface for VirtualNetworksServerTransport. +func (v *VirtualNetworksServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualNetworksClient.CheckIPAddressAvailability": + resp, err = v.dispatchCheckIPAddressAvailability(req) + case "VirtualNetworksClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualNetworksClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualNetworksClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualNetworksClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualNetworksClient.NewListAllPager": + resp, err = v.dispatchNewListAllPager(req) + case "VirtualNetworksClient.BeginListDdosProtectionStatus": + resp, err = v.dispatchBeginListDdosProtectionStatus(req) + case "VirtualNetworksClient.NewListUsagePager": + resp, err = v.dispatchNewListUsagePager(req) + case "VirtualNetworksClient.UpdateTags": + resp, err = v.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchCheckIPAddressAvailability(req *http.Request) (*http.Response, error) { + if v.srv.CheckIPAddressAvailability == nil { + return nil, &nonRetriableError{errors.New("fake for method CheckIPAddressAvailability not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/CheckIPAddressAvailability` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + ipAddressParam, err := url.QueryUnescape(qp.Get("ipAddress")) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.CheckIPAddressAvailability(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, ipAddressParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).IPAddressAvailabilityResult, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualNetwork](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.VirtualNetworksClientGetOptions + if expandParam != nil { + options = &armnetwork.VirtualNetworksClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetwork, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualNetworksClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := v.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListAllPager(nil) + newListAllPager = &resp + v.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.VirtualNetworksClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + v.newListAllPager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchBeginListDdosProtectionStatus(req *http.Request) (*http.Response, error) { + if v.srv.BeginListDdosProtectionStatus == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListDdosProtectionStatus not implemented")} + } + beginListDdosProtectionStatus := v.beginListDdosProtectionStatus.get(req) + if beginListDdosProtectionStatus == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ddosProtectionStatus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + topUnescaped, err := url.QueryUnescape(qp.Get("top")) + if err != nil { + return nil, err + } + topParam, err := parseOptional(topUnescaped, func(v string) (int32, error) { + p, parseErr := strconv.ParseInt(v, 10, 32) + if parseErr != nil { + return 0, parseErr + } + return int32(p), nil + }) + if err != nil { + return nil, err + } + skipTokenUnescaped, err := url.QueryUnescape(qp.Get("skipToken")) + if err != nil { + return nil, err + } + skipTokenParam := getOptional(skipTokenUnescaped) + var options *armnetwork.VirtualNetworksClientBeginListDdosProtectionStatusOptions + if topParam != nil || skipTokenParam != nil { + options = &armnetwork.VirtualNetworksClientBeginListDdosProtectionStatusOptions{ + Top: topParam, + SkipToken: skipTokenParam, + } + } + respr, errRespr := v.srv.BeginListDdosProtectionStatus(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListDdosProtectionStatus = &respr + v.beginListDdosProtectionStatus.add(req, beginListDdosProtectionStatus) + } + + resp, err := server.PollerResponderNext(beginListDdosProtectionStatus, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginListDdosProtectionStatus.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListDdosProtectionStatus) { + v.beginListDdosProtectionStatus.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchNewListUsagePager(req *http.Request) (*http.Response, error) { + if v.srv.NewListUsagePager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListUsagePager not implemented")} + } + newListUsagePager := v.newListUsagePager.get(req) + if newListUsagePager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/usages` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListUsagePager(resourceGroupNameParam, virtualNetworkNameParam, nil) + newListUsagePager = &resp + v.newListUsagePager.add(req, newListUsagePager) + server.PagerResponderInjectNextLinks(newListUsagePager, req, func(page *armnetwork.VirtualNetworksClientListUsageResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListUsagePager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListUsagePager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListUsagePager) { + v.newListUsagePager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworksServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualNetworkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualNetworkName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.UpdateTags(req.Context(), resourceGroupNameParam, virtualNetworkNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetwork, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworktaps_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworktaps_server.go new file mode 100644 index 00000000000..cb1cabc0836 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualnetworktaps_server.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualNetworkTapsServer is a fake server for instances of the armnetwork.VirtualNetworkTapsClient type. +type VirtualNetworkTapsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualNetworkTapsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, tapName string, parameters armnetwork.VirtualNetworkTap, options *armnetwork.VirtualNetworkTapsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkTapsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualNetworkTapsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, tapName string, options *armnetwork.VirtualNetworkTapsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualNetworkTapsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualNetworkTapsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, tapName string, options *armnetwork.VirtualNetworkTapsClientGetOptions) (resp azfake.Responder[armnetwork.VirtualNetworkTapsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListAllPager is the fake for method VirtualNetworkTapsClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.VirtualNetworkTapsClientListAllOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworkTapsClientListAllResponse]) + + // NewListByResourceGroupPager is the fake for method VirtualNetworkTapsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VirtualNetworkTapsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VirtualNetworkTapsClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method VirtualNetworkTapsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, tapName string, tapParameters armnetwork.TagsObject, options *armnetwork.VirtualNetworkTapsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.VirtualNetworkTapsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualNetworkTapsServerTransport creates a new instance of VirtualNetworkTapsServerTransport with the provided implementation. +// The returned VirtualNetworkTapsServerTransport instance is connected to an instance of armnetwork.VirtualNetworkTapsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualNetworkTapsServerTransport(srv *VirtualNetworkTapsServer) *VirtualNetworkTapsServerTransport { + return &VirtualNetworkTapsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkTapsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualNetworkTapsClientDeleteResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworkTapsClientListAllResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VirtualNetworkTapsClientListByResourceGroupResponse]](), + } +} + +// VirtualNetworkTapsServerTransport connects instances of armnetwork.VirtualNetworkTapsClient to instances of VirtualNetworkTapsServer. +// Don't use this type directly, use NewVirtualNetworkTapsServerTransport instead. +type VirtualNetworkTapsServerTransport struct { + srv *VirtualNetworkTapsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkTapsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualNetworkTapsClientDeleteResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworkTapsClientListAllResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VirtualNetworkTapsClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for VirtualNetworkTapsServerTransport. +func (v *VirtualNetworkTapsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualNetworkTapsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualNetworkTapsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualNetworkTapsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualNetworkTapsClient.NewListAllPager": + resp, err = v.dispatchNewListAllPager(req) + case "VirtualNetworkTapsClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + case "VirtualNetworkTapsClient.UpdateTags": + resp, err = v.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualNetworkTapsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkTaps/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualNetworkTap](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + tapNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("tapName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, tapNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkTapsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkTaps/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + tapNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("tapName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, tapNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualNetworkTapsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkTaps/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + tapNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("tapName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, tapNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetworkTap, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualNetworkTapsServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := v.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkTaps` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListAllPager(nil) + newListAllPager = &resp + v.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.VirtualNetworkTapsClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + v.newListAllPager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworkTapsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkTaps` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VirtualNetworkTapsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (v *VirtualNetworkTapsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualNetworkTaps/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + tapNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("tapName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.UpdateTags(req.Context(), resourceGroupNameParam, tapNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualNetworkTap, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualrouterpeerings_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualrouterpeerings_server.go new file mode 100644 index 00000000000..f61c37a4591 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualrouterpeerings_server.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualRouterPeeringsServer is a fake server for instances of the armnetwork.VirtualRouterPeeringsClient type. +type VirtualRouterPeeringsServer struct { + // BeginCreateOrUpdate is the fake for method VirtualRouterPeeringsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualRouterName string, peeringName string, parameters armnetwork.VirtualRouterPeering, options *armnetwork.VirtualRouterPeeringsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualRouterPeeringsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualRouterPeeringsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualRouterName string, peeringName string, options *armnetwork.VirtualRouterPeeringsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualRouterPeeringsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualRouterPeeringsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualRouterName string, peeringName string, options *armnetwork.VirtualRouterPeeringsClientGetOptions) (resp azfake.Responder[armnetwork.VirtualRouterPeeringsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualRouterPeeringsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, virtualRouterName string, options *armnetwork.VirtualRouterPeeringsClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualRouterPeeringsClientListResponse]) +} + +// NewVirtualRouterPeeringsServerTransport creates a new instance of VirtualRouterPeeringsServerTransport with the provided implementation. +// The returned VirtualRouterPeeringsServerTransport instance is connected to an instance of armnetwork.VirtualRouterPeeringsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualRouterPeeringsServerTransport(srv *VirtualRouterPeeringsServer) *VirtualRouterPeeringsServerTransport { + return &VirtualRouterPeeringsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualRouterPeeringsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualRouterPeeringsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualRouterPeeringsClientListResponse]](), + } +} + +// VirtualRouterPeeringsServerTransport connects instances of armnetwork.VirtualRouterPeeringsClient to instances of VirtualRouterPeeringsServer. +// Don't use this type directly, use NewVirtualRouterPeeringsServerTransport instead. +type VirtualRouterPeeringsServerTransport struct { + srv *VirtualRouterPeeringsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualRouterPeeringsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualRouterPeeringsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualRouterPeeringsClientListResponse]] +} + +// Do implements the policy.Transporter interface for VirtualRouterPeeringsServerTransport. +func (v *VirtualRouterPeeringsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualRouterPeeringsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualRouterPeeringsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualRouterPeeringsClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualRouterPeeringsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualRouterPeeringsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualRouterPeering](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualRouterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualRouterName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualRouterNameParam, peeringNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualRouterPeeringsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualRouterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualRouterName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualRouterNameParam, peeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualRouterPeeringsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualRouterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualRouterName")]) + if err != nil { + return nil, err + } + peeringNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("peeringName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualRouterNameParam, peeringNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualRouterPeering, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualRouterPeeringsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/peerings` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualRouterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualRouterName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListPager(resourceGroupNameParam, virtualRouterNameParam, nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualRouterPeeringsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualrouters_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualrouters_server.go new file mode 100644 index 00000000000..7d21827d77b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualrouters_server.go @@ -0,0 +1,309 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualRoutersServer is a fake server for instances of the armnetwork.VirtualRoutersClient type. +type VirtualRoutersServer struct { + // BeginCreateOrUpdate is the fake for method VirtualRoutersClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualRouterName string, parameters armnetwork.VirtualRouter, options *armnetwork.VirtualRoutersClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualRoutersClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualRoutersClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualRouterName string, options *armnetwork.VirtualRoutersClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualRoutersClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualRoutersClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualRouterName string, options *armnetwork.VirtualRoutersClientGetOptions) (resp azfake.Responder[armnetwork.VirtualRoutersClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualRoutersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VirtualRoutersClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualRoutersClientListResponse]) + + // NewListByResourceGroupPager is the fake for method VirtualRoutersClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VirtualRoutersClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VirtualRoutersClientListByResourceGroupResponse]) +} + +// NewVirtualRoutersServerTransport creates a new instance of VirtualRoutersServerTransport with the provided implementation. +// The returned VirtualRoutersServerTransport instance is connected to an instance of armnetwork.VirtualRoutersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualRoutersServerTransport(srv *VirtualRoutersServer) *VirtualRoutersServerTransport { + return &VirtualRoutersServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualRoutersClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualRoutersClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualRoutersClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VirtualRoutersClientListByResourceGroupResponse]](), + } +} + +// VirtualRoutersServerTransport connects instances of armnetwork.VirtualRoutersClient to instances of VirtualRoutersServer. +// Don't use this type directly, use NewVirtualRoutersServerTransport instead. +type VirtualRoutersServerTransport struct { + srv *VirtualRoutersServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualRoutersClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualRoutersClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualRoutersClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VirtualRoutersClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for VirtualRoutersServerTransport. +func (v *VirtualRoutersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualRoutersClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualRoutersClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualRoutersClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualRoutersClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualRoutersClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualRoutersServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualRouter](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualRouterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualRouterName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualRouterNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualRoutersServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualRouterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualRouterName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualRouterNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualRoutersServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualRouterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualRouterName")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.VirtualRoutersClientGetOptions + if expandParam != nil { + options = &armnetwork.VirtualRoutersClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualRouterNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualRouter, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualRoutersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualRoutersClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualRoutersServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualRouters` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VirtualRoutersClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualwans_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualwans_server.go new file mode 100644 index 00000000000..bd3b6cc66fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/virtualwans_server.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VirtualWansServer is a fake server for instances of the armnetwork.VirtualWansClient type. +type VirtualWansServer struct { + // BeginCreateOrUpdate is the fake for method VirtualWansClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, virtualWANName string, wanParameters armnetwork.VirtualWAN, options *armnetwork.VirtualWansClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VirtualWansClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VirtualWansClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, virtualWANName string, options *armnetwork.VirtualWansClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VirtualWansClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VirtualWansClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, virtualWANName string, options *armnetwork.VirtualWansClientGetOptions) (resp azfake.Responder[armnetwork.VirtualWansClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VirtualWansClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VirtualWansClientListOptions) (resp azfake.PagerResponder[armnetwork.VirtualWansClientListResponse]) + + // NewListByResourceGroupPager is the fake for method VirtualWansClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VirtualWansClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VirtualWansClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method VirtualWansClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, virtualWANName string, wanParameters armnetwork.TagsObject, options *armnetwork.VirtualWansClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.VirtualWansClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVirtualWansServerTransport creates a new instance of VirtualWansServerTransport with the provided implementation. +// The returned VirtualWansServerTransport instance is connected to an instance of armnetwork.VirtualWansClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVirtualWansServerTransport(srv *VirtualWansServer) *VirtualWansServerTransport { + return &VirtualWansServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VirtualWansClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VirtualWansClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VirtualWansClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VirtualWansClientListByResourceGroupResponse]](), + } +} + +// VirtualWansServerTransport connects instances of armnetwork.VirtualWansClient to instances of VirtualWansServer. +// Don't use this type directly, use NewVirtualWansServerTransport instead. +type VirtualWansServerTransport struct { + srv *VirtualWansServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VirtualWansClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VirtualWansClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VirtualWansClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VirtualWansClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for VirtualWansServerTransport. +func (v *VirtualWansServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VirtualWansClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VirtualWansClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VirtualWansClient.Get": + resp, err = v.dispatchGet(req) + case "VirtualWansClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VirtualWansClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + case "VirtualWansClient.UpdateTags": + resp, err = v.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VirtualWansServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VirtualWAN](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("VirtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, virtualWANNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VirtualWansServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("VirtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, virtualWANNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VirtualWansServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("VirtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, virtualWANNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualWAN, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VirtualWansServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VirtualWansClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VirtualWansServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VirtualWansClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (v *VirtualWansServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("VirtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.UpdateTags(req.Context(), resourceGroupNameParam, virtualWANNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VirtualWAN, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnconnections_server.go new file mode 100644 index 00000000000..ca4d6af2953 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnconnections_server.go @@ -0,0 +1,405 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// VPNConnectionsServer is a fake server for instances of the armnetwork.VPNConnectionsClient type. +type VPNConnectionsServer struct { + // BeginCreateOrUpdate is the fake for method VPNConnectionsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, vpnConnectionParameters armnetwork.VPNConnection, options *armnetwork.VPNConnectionsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VPNConnectionsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VPNConnectionsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, options *armnetwork.VPNConnectionsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VPNConnectionsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VPNConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, options *armnetwork.VPNConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.VPNConnectionsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByVPNGatewayPager is the fake for method VPNConnectionsClient.NewListByVPNGatewayPager + // HTTP status codes to indicate success: http.StatusOK + NewListByVPNGatewayPager func(resourceGroupName string, gatewayName string, options *armnetwork.VPNConnectionsClientListByVPNGatewayOptions) (resp azfake.PagerResponder[armnetwork.VPNConnectionsClientListByVPNGatewayResponse]) + + // BeginStartPacketCapture is the fake for method VPNConnectionsClient.BeginStartPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStartPacketCapture func(ctx context.Context, resourceGroupName string, gatewayName string, vpnConnectionName string, options *armnetwork.VPNConnectionsClientBeginStartPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VPNConnectionsClientStartPacketCaptureResponse], errResp azfake.ErrorResponder) + + // BeginStopPacketCapture is the fake for method VPNConnectionsClient.BeginStopPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStopPacketCapture func(ctx context.Context, resourceGroupName string, gatewayName string, vpnConnectionName string, options *armnetwork.VPNConnectionsClientBeginStopPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VPNConnectionsClientStopPacketCaptureResponse], errResp azfake.ErrorResponder) +} + +// NewVPNConnectionsServerTransport creates a new instance of VPNConnectionsServerTransport with the provided implementation. +// The returned VPNConnectionsServerTransport instance is connected to an instance of armnetwork.VPNConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNConnectionsServerTransport(srv *VPNConnectionsServer) *VPNConnectionsServerTransport { + return &VPNConnectionsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientDeleteResponse]](), + newListByVPNGatewayPager: newTracker[azfake.PagerResponder[armnetwork.VPNConnectionsClientListByVPNGatewayResponse]](), + beginStartPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientStartPacketCaptureResponse]](), + beginStopPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientStopPacketCaptureResponse]](), + } +} + +// VPNConnectionsServerTransport connects instances of armnetwork.VPNConnectionsClient to instances of VPNConnectionsServer. +// Don't use this type directly, use NewVPNConnectionsServerTransport instead. +type VPNConnectionsServerTransport struct { + srv *VPNConnectionsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientDeleteResponse]] + newListByVPNGatewayPager *tracker[azfake.PagerResponder[armnetwork.VPNConnectionsClientListByVPNGatewayResponse]] + beginStartPacketCapture *tracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientStartPacketCaptureResponse]] + beginStopPacketCapture *tracker[azfake.PollerResponder[armnetwork.VPNConnectionsClientStopPacketCaptureResponse]] +} + +// Do implements the policy.Transporter interface for VPNConnectionsServerTransport. +func (v *VPNConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNConnectionsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VPNConnectionsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VPNConnectionsClient.Get": + resp, err = v.dispatchGet(req) + case "VPNConnectionsClient.NewListByVPNGatewayPager": + resp, err = v.dispatchNewListByVPNGatewayPager(req) + case "VPNConnectionsClient.BeginStartPacketCapture": + resp, err = v.dispatchBeginStartPacketCapture(req) + case "VPNConnectionsClient.BeginStopPacketCapture": + resp, err = v.dispatchBeginStopPacketCapture(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNConnectionsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNConnection](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, gatewayNameParam, connectionNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VPNConnectionsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, gatewayNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VPNConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, gatewayNameParam, connectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VPNConnectionsServerTransport) dispatchNewListByVPNGatewayPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByVPNGatewayPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByVPNGatewayPager not implemented")} + } + newListByVPNGatewayPager := v.newListByVPNGatewayPager.get(req) + if newListByVPNGatewayPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByVPNGatewayPager(resourceGroupNameParam, gatewayNameParam, nil) + newListByVPNGatewayPager = &resp + v.newListByVPNGatewayPager.add(req, newListByVPNGatewayPager) + server.PagerResponderInjectNextLinks(newListByVPNGatewayPager, req, func(page *armnetwork.VPNConnectionsClientListByVPNGatewayResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByVPNGatewayPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByVPNGatewayPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByVPNGatewayPager) { + v.newListByVPNGatewayPager.remove(req) + } + return resp, nil +} + +func (v *VPNConnectionsServerTransport) dispatchBeginStartPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStartPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStartPacketCapture not implemented")} + } + beginStartPacketCapture := v.beginStartPacketCapture.get(req) + if beginStartPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/startpacketcapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNConnectionPacketCaptureStartParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + vpnConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnConnectionName")]) + if err != nil { + return nil, err + } + var options *armnetwork.VPNConnectionsClientBeginStartPacketCaptureOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.VPNConnectionsClientBeginStartPacketCaptureOptions{ + Parameters: &body, + } + } + respr, errRespr := v.srv.BeginStartPacketCapture(req.Context(), resourceGroupNameParam, gatewayNameParam, vpnConnectionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStartPacketCapture = &respr + v.beginStartPacketCapture.add(req, beginStartPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStartPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStartPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStartPacketCapture) { + v.beginStartPacketCapture.remove(req) + } + + return resp, nil +} + +func (v *VPNConnectionsServerTransport) dispatchBeginStopPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStopPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStopPacketCapture not implemented")} + } + beginStopPacketCapture := v.beginStopPacketCapture.get(req) + if beginStopPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stoppacketcapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNConnectionPacketCaptureStopParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + vpnConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnConnectionName")]) + if err != nil { + return nil, err + } + var options *armnetwork.VPNConnectionsClientBeginStopPacketCaptureOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.VPNConnectionsClientBeginStopPacketCaptureOptions{ + Parameters: &body, + } + } + respr, errRespr := v.srv.BeginStopPacketCapture(req.Context(), resourceGroupNameParam, gatewayNameParam, vpnConnectionNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStopPacketCapture = &respr + v.beginStopPacketCapture.add(req, beginStopPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStopPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStopPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStopPacketCapture) { + v.beginStopPacketCapture.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpngateways_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpngateways_server.go new file mode 100644 index 00000000000..dd3f66b9454 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpngateways_server.go @@ -0,0 +1,542 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "reflect" + "regexp" +) + +// VPNGatewaysServer is a fake server for instances of the armnetwork.VPNGatewaysClient type. +type VPNGatewaysServer struct { + // BeginCreateOrUpdate is the fake for method VPNGatewaysClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, gatewayName string, vpnGatewayParameters armnetwork.VPNGateway, options *armnetwork.VPNGatewaysClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VPNGatewaysClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VPNGatewaysClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.VPNGatewaysClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VPNGatewaysClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VPNGatewaysClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.VPNGatewaysClientGetOptions) (resp azfake.Responder[armnetwork.VPNGatewaysClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VPNGatewaysClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VPNGatewaysClientListOptions) (resp azfake.PagerResponder[armnetwork.VPNGatewaysClientListResponse]) + + // NewListByResourceGroupPager is the fake for method VPNGatewaysClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VPNGatewaysClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VPNGatewaysClientListByResourceGroupResponse]) + + // BeginReset is the fake for method VPNGatewaysClient.BeginReset + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginReset func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.VPNGatewaysClientBeginResetOptions) (resp azfake.PollerResponder[armnetwork.VPNGatewaysClientResetResponse], errResp azfake.ErrorResponder) + + // BeginStartPacketCapture is the fake for method VPNGatewaysClient.BeginStartPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStartPacketCapture func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.VPNGatewaysClientBeginStartPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VPNGatewaysClientStartPacketCaptureResponse], errResp azfake.ErrorResponder) + + // BeginStopPacketCapture is the fake for method VPNGatewaysClient.BeginStopPacketCapture + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginStopPacketCapture func(ctx context.Context, resourceGroupName string, gatewayName string, options *armnetwork.VPNGatewaysClientBeginStopPacketCaptureOptions) (resp azfake.PollerResponder[armnetwork.VPNGatewaysClientStopPacketCaptureResponse], errResp azfake.ErrorResponder) + + // BeginUpdateTags is the fake for method VPNGatewaysClient.BeginUpdateTags + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdateTags func(ctx context.Context, resourceGroupName string, gatewayName string, vpnGatewayParameters armnetwork.TagsObject, options *armnetwork.VPNGatewaysClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armnetwork.VPNGatewaysClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVPNGatewaysServerTransport creates a new instance of VPNGatewaysServerTransport with the provided implementation. +// The returned VPNGatewaysServerTransport instance is connected to an instance of armnetwork.VPNGatewaysClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNGatewaysServerTransport(srv *VPNGatewaysServer) *VPNGatewaysServerTransport { + return &VPNGatewaysServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VPNGatewaysClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VPNGatewaysClientListByResourceGroupResponse]](), + beginReset: newTracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientResetResponse]](), + beginStartPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientStartPacketCaptureResponse]](), + beginStopPacketCapture: newTracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientStopPacketCaptureResponse]](), + beginUpdateTags: newTracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientUpdateTagsResponse]](), + } +} + +// VPNGatewaysServerTransport connects instances of armnetwork.VPNGatewaysClient to instances of VPNGatewaysServer. +// Don't use this type directly, use NewVPNGatewaysServerTransport instead. +type VPNGatewaysServerTransport struct { + srv *VPNGatewaysServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VPNGatewaysClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VPNGatewaysClientListByResourceGroupResponse]] + beginReset *tracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientResetResponse]] + beginStartPacketCapture *tracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientStartPacketCaptureResponse]] + beginStopPacketCapture *tracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientStopPacketCaptureResponse]] + beginUpdateTags *tracker[azfake.PollerResponder[armnetwork.VPNGatewaysClientUpdateTagsResponse]] +} + +// Do implements the policy.Transporter interface for VPNGatewaysServerTransport. +func (v *VPNGatewaysServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNGatewaysClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VPNGatewaysClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VPNGatewaysClient.Get": + resp, err = v.dispatchGet(req) + case "VPNGatewaysClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VPNGatewaysClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + case "VPNGatewaysClient.BeginReset": + resp, err = v.dispatchBeginReset(req) + case "VPNGatewaysClient.BeginStartPacketCapture": + resp, err = v.dispatchBeginStartPacketCapture(req) + case "VPNGatewaysClient.BeginStopPacketCapture": + resp, err = v.dispatchBeginStopPacketCapture(req) + case "VPNGatewaysClient.BeginUpdateTags": + resp, err = v.dispatchBeginUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNGateway](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, gatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, gatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, gatewayNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNGateway, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VPNGatewaysClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VPNGatewaysClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchBeginReset(req *http.Request) (*http.Response, error) { + if v.srv.BeginReset == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginReset not implemented")} + } + beginReset := v.beginReset.get(req) + if beginReset == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/reset` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + iPConfigurationIDUnescaped, err := url.QueryUnescape(qp.Get("ipConfigurationId")) + if err != nil { + return nil, err + } + iPConfigurationIDParam := getOptional(iPConfigurationIDUnescaped) + var options *armnetwork.VPNGatewaysClientBeginResetOptions + if iPConfigurationIDParam != nil { + options = &armnetwork.VPNGatewaysClientBeginResetOptions{ + IPConfigurationID: iPConfigurationIDParam, + } + } + respr, errRespr := v.srv.BeginReset(req.Context(), resourceGroupNameParam, gatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginReset = &respr + v.beginReset.add(req, beginReset) + } + + resp, err := server.PollerResponderNext(beginReset, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginReset.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginReset) { + v.beginReset.remove(req) + } + + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchBeginStartPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStartPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStartPacketCapture not implemented")} + } + beginStartPacketCapture := v.beginStartPacketCapture.get(req) + if beginStartPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/startpacketcapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNGatewayPacketCaptureStartParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + var options *armnetwork.VPNGatewaysClientBeginStartPacketCaptureOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.VPNGatewaysClientBeginStartPacketCaptureOptions{ + Parameters: &body, + } + } + respr, errRespr := v.srv.BeginStartPacketCapture(req.Context(), resourceGroupNameParam, gatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStartPacketCapture = &respr + v.beginStartPacketCapture.add(req, beginStartPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStartPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStartPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStartPacketCapture) { + v.beginStartPacketCapture.remove(req) + } + + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchBeginStopPacketCapture(req *http.Request) (*http.Response, error) { + if v.srv.BeginStopPacketCapture == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginStopPacketCapture not implemented")} + } + beginStopPacketCapture := v.beginStopPacketCapture.get(req) + if beginStopPacketCapture == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/stoppacketcapture` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNGatewayPacketCaptureStopParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + var options *armnetwork.VPNGatewaysClientBeginStopPacketCaptureOptions + if !reflect.ValueOf(body).IsZero() { + options = &armnetwork.VPNGatewaysClientBeginStopPacketCaptureOptions{ + Parameters: &body, + } + } + respr, errRespr := v.srv.BeginStopPacketCapture(req.Context(), resourceGroupNameParam, gatewayNameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginStopPacketCapture = &respr + v.beginStopPacketCapture.add(req, beginStopPacketCapture) + } + + resp, err := server.PollerResponderNext(beginStopPacketCapture, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginStopPacketCapture.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginStopPacketCapture) { + v.beginStopPacketCapture.remove(req) + } + + return resp, nil +} + +func (v *VPNGatewaysServerTransport) dispatchBeginUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.BeginUpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdateTags not implemented")} + } + beginUpdateTags := v.beginUpdateTags.get(req) + if beginUpdateTags == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginUpdateTags(req.Context(), resourceGroupNameParam, gatewayNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdateTags = &respr + v.beginUpdateTags.add(req, beginUpdateTags) + } + + resp, err := server.PollerResponderNext(beginUpdateTags, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginUpdateTags.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdateTags) { + v.beginUpdateTags.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnlinkconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnlinkconnections_server.go new file mode 100644 index 00000000000..86bc4241634 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnlinkconnections_server.go @@ -0,0 +1,237 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VPNLinkConnectionsServer is a fake server for instances of the armnetwork.VPNLinkConnectionsClient type. +type VPNLinkConnectionsServer struct { + // BeginGetIkeSas is the fake for method VPNLinkConnectionsClient.BeginGetIkeSas + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetIkeSas func(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string, options *armnetwork.VPNLinkConnectionsClientBeginGetIkeSasOptions) (resp azfake.PollerResponder[armnetwork.VPNLinkConnectionsClientGetIkeSasResponse], errResp azfake.ErrorResponder) + + // NewListByVPNConnectionPager is the fake for method VPNLinkConnectionsClient.NewListByVPNConnectionPager + // HTTP status codes to indicate success: http.StatusOK + NewListByVPNConnectionPager func(resourceGroupName string, gatewayName string, connectionName string, options *armnetwork.VPNLinkConnectionsClientListByVPNConnectionOptions) (resp azfake.PagerResponder[armnetwork.VPNLinkConnectionsClientListByVPNConnectionResponse]) + + // BeginResetConnection is the fake for method VPNLinkConnectionsClient.BeginResetConnection + // HTTP status codes to indicate success: http.StatusAccepted + BeginResetConnection func(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string, options *armnetwork.VPNLinkConnectionsClientBeginResetConnectionOptions) (resp azfake.PollerResponder[armnetwork.VPNLinkConnectionsClientResetConnectionResponse], errResp azfake.ErrorResponder) +} + +// NewVPNLinkConnectionsServerTransport creates a new instance of VPNLinkConnectionsServerTransport with the provided implementation. +// The returned VPNLinkConnectionsServerTransport instance is connected to an instance of armnetwork.VPNLinkConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNLinkConnectionsServerTransport(srv *VPNLinkConnectionsServer) *VPNLinkConnectionsServerTransport { + return &VPNLinkConnectionsServerTransport{ + srv: srv, + beginGetIkeSas: newTracker[azfake.PollerResponder[armnetwork.VPNLinkConnectionsClientGetIkeSasResponse]](), + newListByVPNConnectionPager: newTracker[azfake.PagerResponder[armnetwork.VPNLinkConnectionsClientListByVPNConnectionResponse]](), + beginResetConnection: newTracker[azfake.PollerResponder[armnetwork.VPNLinkConnectionsClientResetConnectionResponse]](), + } +} + +// VPNLinkConnectionsServerTransport connects instances of armnetwork.VPNLinkConnectionsClient to instances of VPNLinkConnectionsServer. +// Don't use this type directly, use NewVPNLinkConnectionsServerTransport instead. +type VPNLinkConnectionsServerTransport struct { + srv *VPNLinkConnectionsServer + beginGetIkeSas *tracker[azfake.PollerResponder[armnetwork.VPNLinkConnectionsClientGetIkeSasResponse]] + newListByVPNConnectionPager *tracker[azfake.PagerResponder[armnetwork.VPNLinkConnectionsClientListByVPNConnectionResponse]] + beginResetConnection *tracker[azfake.PollerResponder[armnetwork.VPNLinkConnectionsClientResetConnectionResponse]] +} + +// Do implements the policy.Transporter interface for VPNLinkConnectionsServerTransport. +func (v *VPNLinkConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNLinkConnectionsClient.BeginGetIkeSas": + resp, err = v.dispatchBeginGetIkeSas(req) + case "VPNLinkConnectionsClient.NewListByVPNConnectionPager": + resp, err = v.dispatchNewListByVPNConnectionPager(req) + case "VPNLinkConnectionsClient.BeginResetConnection": + resp, err = v.dispatchBeginResetConnection(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNLinkConnectionsServerTransport) dispatchBeginGetIkeSas(req *http.Request) (*http.Response, error) { + if v.srv.BeginGetIkeSas == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetIkeSas not implemented")} + } + beginGetIkeSas := v.beginGetIkeSas.get(req) + if beginGetIkeSas == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnLinkConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/getikesas` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + linkConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("linkConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginGetIkeSas(req.Context(), resourceGroupNameParam, gatewayNameParam, connectionNameParam, linkConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetIkeSas = &respr + v.beginGetIkeSas.add(req, beginGetIkeSas) + } + + resp, err := server.PollerResponderNext(beginGetIkeSas, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginGetIkeSas.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetIkeSas) { + v.beginGetIkeSas.remove(req) + } + + return resp, nil +} + +func (v *VPNLinkConnectionsServerTransport) dispatchNewListByVPNConnectionPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByVPNConnectionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByVPNConnectionPager not implemented")} + } + newListByVPNConnectionPager := v.newListByVPNConnectionPager.get(req) + if newListByVPNConnectionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnLinkConnections` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByVPNConnectionPager(resourceGroupNameParam, gatewayNameParam, connectionNameParam, nil) + newListByVPNConnectionPager = &resp + v.newListByVPNConnectionPager.add(req, newListByVPNConnectionPager) + server.PagerResponderInjectNextLinks(newListByVPNConnectionPager, req, func(page *armnetwork.VPNLinkConnectionsClientListByVPNConnectionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByVPNConnectionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByVPNConnectionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByVPNConnectionPager) { + v.newListByVPNConnectionPager.remove(req) + } + return resp, nil +} + +func (v *VPNLinkConnectionsServerTransport) dispatchBeginResetConnection(req *http.Request) (*http.Response, error) { + if v.srv.BeginResetConnection == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginResetConnection not implemented")} + } + beginResetConnection := v.beginResetConnection.get(req) + if beginResetConnection == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnLinkConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resetconnection` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + linkConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("linkConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginResetConnection(req.Context(), resourceGroupNameParam, gatewayNameParam, connectionNameParam, linkConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginResetConnection = &respr + v.beginResetConnection.add(req, beginResetConnection) + } + + resp, err := server.PollerResponderNext(beginResetConnection, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted}, resp.StatusCode) { + v.beginResetConnection.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginResetConnection) { + v.beginResetConnection.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnserverconfigurations_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnserverconfigurations_server.go new file mode 100644 index 00000000000..d4cbe14a7d9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnserverconfigurations_server.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VPNServerConfigurationsServer is a fake server for instances of the armnetwork.VPNServerConfigurationsClient type. +type VPNServerConfigurationsServer struct { + // BeginCreateOrUpdate is the fake for method VPNServerConfigurationsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, vpnServerConfigurationParameters armnetwork.VPNServerConfiguration, options *armnetwork.VPNServerConfigurationsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VPNServerConfigurationsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VPNServerConfigurationsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, options *armnetwork.VPNServerConfigurationsClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VPNServerConfigurationsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VPNServerConfigurationsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, options *armnetwork.VPNServerConfigurationsClientGetOptions) (resp azfake.Responder[armnetwork.VPNServerConfigurationsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VPNServerConfigurationsClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VPNServerConfigurationsClientListOptions) (resp azfake.PagerResponder[armnetwork.VPNServerConfigurationsClientListResponse]) + + // NewListByResourceGroupPager is the fake for method VPNServerConfigurationsClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VPNServerConfigurationsClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VPNServerConfigurationsClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method VPNServerConfigurationsClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, vpnServerConfigurationParameters armnetwork.TagsObject, options *armnetwork.VPNServerConfigurationsClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.VPNServerConfigurationsClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVPNServerConfigurationsServerTransport creates a new instance of VPNServerConfigurationsServerTransport with the provided implementation. +// The returned VPNServerConfigurationsServerTransport instance is connected to an instance of armnetwork.VPNServerConfigurationsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNServerConfigurationsServerTransport(srv *VPNServerConfigurationsServer) *VPNServerConfigurationsServerTransport { + return &VPNServerConfigurationsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VPNServerConfigurationsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VPNServerConfigurationsClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VPNServerConfigurationsClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VPNServerConfigurationsClientListByResourceGroupResponse]](), + } +} + +// VPNServerConfigurationsServerTransport connects instances of armnetwork.VPNServerConfigurationsClient to instances of VPNServerConfigurationsServer. +// Don't use this type directly, use NewVPNServerConfigurationsServerTransport instead. +type VPNServerConfigurationsServerTransport struct { + srv *VPNServerConfigurationsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VPNServerConfigurationsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VPNServerConfigurationsClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VPNServerConfigurationsClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VPNServerConfigurationsClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for VPNServerConfigurationsServerTransport. +func (v *VPNServerConfigurationsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNServerConfigurationsClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VPNServerConfigurationsClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VPNServerConfigurationsClient.Get": + resp, err = v.dispatchGet(req) + case "VPNServerConfigurationsClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VPNServerConfigurationsClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + case "VPNServerConfigurationsClient.UpdateTags": + resp, err = v.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNServerConfigurationsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNServerConfiguration](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vpnServerConfigurationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VPNServerConfigurationsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vpnServerConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VPNServerConfigurationsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vpnServerConfigurationNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNServerConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VPNServerConfigurationsServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VPNServerConfigurationsClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VPNServerConfigurationsServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VPNServerConfigurationsClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (v *VPNServerConfigurationsServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnServerConfigurations/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnServerConfigurationNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnServerConfigurationName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.UpdateTags(req.Context(), resourceGroupNameParam, vpnServerConfigurationNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNServerConfiguration, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnserverconfigurationsassociatedwithvirtualwan_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnserverconfigurationsassociatedwithvirtualwan_server.go new file mode 100644 index 00000000000..191d98e2a2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnserverconfigurationsassociatedwithvirtualwan_server.go @@ -0,0 +1,115 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VPNServerConfigurationsAssociatedWithVirtualWanServer is a fake server for instances of the armnetwork.VPNServerConfigurationsAssociatedWithVirtualWanClient type. +type VPNServerConfigurationsAssociatedWithVirtualWanServer struct { + // BeginList is the fake for method VPNServerConfigurationsAssociatedWithVirtualWanClient.BeginList + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginList func(ctx context.Context, resourceGroupName string, virtualWANName string, options *armnetwork.VPNServerConfigurationsAssociatedWithVirtualWanClientBeginListOptions) (resp azfake.PollerResponder[armnetwork.VPNServerConfigurationsAssociatedWithVirtualWanClientListResponse], errResp azfake.ErrorResponder) +} + +// NewVPNServerConfigurationsAssociatedWithVirtualWanServerTransport creates a new instance of VPNServerConfigurationsAssociatedWithVirtualWanServerTransport with the provided implementation. +// The returned VPNServerConfigurationsAssociatedWithVirtualWanServerTransport instance is connected to an instance of armnetwork.VPNServerConfigurationsAssociatedWithVirtualWanClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNServerConfigurationsAssociatedWithVirtualWanServerTransport(srv *VPNServerConfigurationsAssociatedWithVirtualWanServer) *VPNServerConfigurationsAssociatedWithVirtualWanServerTransport { + return &VPNServerConfigurationsAssociatedWithVirtualWanServerTransport{ + srv: srv, + beginList: newTracker[azfake.PollerResponder[armnetwork.VPNServerConfigurationsAssociatedWithVirtualWanClientListResponse]](), + } +} + +// VPNServerConfigurationsAssociatedWithVirtualWanServerTransport connects instances of armnetwork.VPNServerConfigurationsAssociatedWithVirtualWanClient to instances of VPNServerConfigurationsAssociatedWithVirtualWanServer. +// Don't use this type directly, use NewVPNServerConfigurationsAssociatedWithVirtualWanServerTransport instead. +type VPNServerConfigurationsAssociatedWithVirtualWanServerTransport struct { + srv *VPNServerConfigurationsAssociatedWithVirtualWanServer + beginList *tracker[azfake.PollerResponder[armnetwork.VPNServerConfigurationsAssociatedWithVirtualWanClientListResponse]] +} + +// Do implements the policy.Transporter interface for VPNServerConfigurationsAssociatedWithVirtualWanServerTransport. +func (v *VPNServerConfigurationsAssociatedWithVirtualWanServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNServerConfigurationsAssociatedWithVirtualWanClient.BeginList": + resp, err = v.dispatchBeginList(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNServerConfigurationsAssociatedWithVirtualWanServerTransport) dispatchBeginList(req *http.Request) (*http.Response, error) { + if v.srv.BeginList == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginList not implemented")} + } + beginList := v.beginList.get(req) + if beginList == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnServerConfigurations` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginList(req.Context(), resourceGroupNameParam, virtualWANNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginList = &respr + v.beginList.add(req, beginList) + } + + resp, err := server.PollerResponderNext(beginList, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginList.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginList) { + v.beginList.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitelinkconnections_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitelinkconnections_server.go new file mode 100644 index 00000000000..5f43001b1c7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitelinkconnections_server.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VPNSiteLinkConnectionsServer is a fake server for instances of the armnetwork.VPNSiteLinkConnectionsClient type. +type VPNSiteLinkConnectionsServer struct { + // Get is the fake for method VPNSiteLinkConnectionsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string, options *armnetwork.VPNSiteLinkConnectionsClientGetOptions) (resp azfake.Responder[armnetwork.VPNSiteLinkConnectionsClientGetResponse], errResp azfake.ErrorResponder) +} + +// NewVPNSiteLinkConnectionsServerTransport creates a new instance of VPNSiteLinkConnectionsServerTransport with the provided implementation. +// The returned VPNSiteLinkConnectionsServerTransport instance is connected to an instance of armnetwork.VPNSiteLinkConnectionsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNSiteLinkConnectionsServerTransport(srv *VPNSiteLinkConnectionsServer) *VPNSiteLinkConnectionsServerTransport { + return &VPNSiteLinkConnectionsServerTransport{srv: srv} +} + +// VPNSiteLinkConnectionsServerTransport connects instances of armnetwork.VPNSiteLinkConnectionsClient to instances of VPNSiteLinkConnectionsServer. +// Don't use this type directly, use NewVPNSiteLinkConnectionsServerTransport instead. +type VPNSiteLinkConnectionsServerTransport struct { + srv *VPNSiteLinkConnectionsServer +} + +// Do implements the policy.Transporter interface for VPNSiteLinkConnectionsServerTransport. +func (v *VPNSiteLinkConnectionsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNSiteLinkConnectionsClient.Get": + resp, err = v.dispatchGet(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNSiteLinkConnectionsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnGateways/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnLinkConnections/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 5 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + gatewayNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("gatewayName")]) + if err != nil { + return nil, err + } + connectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("connectionName")]) + if err != nil { + return nil, err + } + linkConnectionNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("linkConnectionName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, gatewayNameParam, connectionNameParam, linkConnectionNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNSiteLinkConnection, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitelinks_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitelinks_server.go new file mode 100644 index 00000000000..f4506c0b187 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitelinks_server.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VPNSiteLinksServer is a fake server for instances of the armnetwork.VPNSiteLinksClient type. +type VPNSiteLinksServer struct { + // Get is the fake for method VPNSiteLinksClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteLinkName string, options *armnetwork.VPNSiteLinksClientGetOptions) (resp azfake.Responder[armnetwork.VPNSiteLinksClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByVPNSitePager is the fake for method VPNSiteLinksClient.NewListByVPNSitePager + // HTTP status codes to indicate success: http.StatusOK + NewListByVPNSitePager func(resourceGroupName string, vpnSiteName string, options *armnetwork.VPNSiteLinksClientListByVPNSiteOptions) (resp azfake.PagerResponder[armnetwork.VPNSiteLinksClientListByVPNSiteResponse]) +} + +// NewVPNSiteLinksServerTransport creates a new instance of VPNSiteLinksServerTransport with the provided implementation. +// The returned VPNSiteLinksServerTransport instance is connected to an instance of armnetwork.VPNSiteLinksClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNSiteLinksServerTransport(srv *VPNSiteLinksServer) *VPNSiteLinksServerTransport { + return &VPNSiteLinksServerTransport{ + srv: srv, + newListByVPNSitePager: newTracker[azfake.PagerResponder[armnetwork.VPNSiteLinksClientListByVPNSiteResponse]](), + } +} + +// VPNSiteLinksServerTransport connects instances of armnetwork.VPNSiteLinksClient to instances of VPNSiteLinksServer. +// Don't use this type directly, use NewVPNSiteLinksServerTransport instead. +type VPNSiteLinksServerTransport struct { + srv *VPNSiteLinksServer + newListByVPNSitePager *tracker[azfake.PagerResponder[armnetwork.VPNSiteLinksClientListByVPNSiteResponse]] +} + +// Do implements the policy.Transporter interface for VPNSiteLinksServerTransport. +func (v *VPNSiteLinksServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNSiteLinksClient.Get": + resp, err = v.dispatchGet(req) + case "VPNSiteLinksClient.NewListByVPNSitePager": + resp, err = v.dispatchNewListByVPNSitePager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNSiteLinksServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnSiteLinks/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnSiteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnSiteName")]) + if err != nil { + return nil, err + } + vpnSiteLinkNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnSiteLinkName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vpnSiteNameParam, vpnSiteLinkNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNSiteLink, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VPNSiteLinksServerTransport) dispatchNewListByVPNSitePager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByVPNSitePager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByVPNSitePager not implemented")} + } + newListByVPNSitePager := v.newListByVPNSitePager.get(req) + if newListByVPNSitePager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnSiteLinks` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnSiteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnSiteName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByVPNSitePager(resourceGroupNameParam, vpnSiteNameParam, nil) + newListByVPNSitePager = &resp + v.newListByVPNSitePager.add(req, newListByVPNSitePager) + server.PagerResponderInjectNextLinks(newListByVPNSitePager, req, func(page *armnetwork.VPNSiteLinksClientListByVPNSiteResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByVPNSitePager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByVPNSitePager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByVPNSitePager) { + v.newListByVPNSitePager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsites_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsites_server.go new file mode 100644 index 00000000000..7d458a59870 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsites_server.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VPNSitesServer is a fake server for instances of the armnetwork.VPNSitesClient type. +type VPNSitesServer struct { + // BeginCreateOrUpdate is the fake for method VPNSitesClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters armnetwork.VPNSite, options *armnetwork.VPNSitesClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armnetwork.VPNSitesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method VPNSitesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, vpnSiteName string, options *armnetwork.VPNSitesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.VPNSitesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method VPNSitesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, vpnSiteName string, options *armnetwork.VPNSitesClientGetOptions) (resp azfake.Responder[armnetwork.VPNSitesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method VPNSitesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(options *armnetwork.VPNSitesClientListOptions) (resp azfake.PagerResponder[armnetwork.VPNSitesClientListResponse]) + + // NewListByResourceGroupPager is the fake for method VPNSitesClient.NewListByResourceGroupPager + // HTTP status codes to indicate success: http.StatusOK + NewListByResourceGroupPager func(resourceGroupName string, options *armnetwork.VPNSitesClientListByResourceGroupOptions) (resp azfake.PagerResponder[armnetwork.VPNSitesClientListByResourceGroupResponse]) + + // UpdateTags is the fake for method VPNSitesClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters armnetwork.TagsObject, options *armnetwork.VPNSitesClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.VPNSitesClientUpdateTagsResponse], errResp azfake.ErrorResponder) +} + +// NewVPNSitesServerTransport creates a new instance of VPNSitesServerTransport with the provided implementation. +// The returned VPNSitesServerTransport instance is connected to an instance of armnetwork.VPNSitesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNSitesServerTransport(srv *VPNSitesServer) *VPNSitesServerTransport { + return &VPNSitesServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armnetwork.VPNSitesClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.VPNSitesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.VPNSitesClientListResponse]](), + newListByResourceGroupPager: newTracker[azfake.PagerResponder[armnetwork.VPNSitesClientListByResourceGroupResponse]](), + } +} + +// VPNSitesServerTransport connects instances of armnetwork.VPNSitesClient to instances of VPNSitesServer. +// Don't use this type directly, use NewVPNSitesServerTransport instead. +type VPNSitesServerTransport struct { + srv *VPNSitesServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armnetwork.VPNSitesClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.VPNSitesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.VPNSitesClientListResponse]] + newListByResourceGroupPager *tracker[azfake.PagerResponder[armnetwork.VPNSitesClientListByResourceGroupResponse]] +} + +// Do implements the policy.Transporter interface for VPNSitesServerTransport. +func (v *VPNSitesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNSitesClient.BeginCreateOrUpdate": + resp, err = v.dispatchBeginCreateOrUpdate(req) + case "VPNSitesClient.BeginDelete": + resp, err = v.dispatchBeginDelete(req) + case "VPNSitesClient.Get": + resp, err = v.dispatchGet(req) + case "VPNSitesClient.NewListPager": + resp, err = v.dispatchNewListPager(req) + case "VPNSitesClient.NewListByResourceGroupPager": + resp, err = v.dispatchNewListByResourceGroupPager(req) + case "VPNSitesClient.UpdateTags": + resp, err = v.dispatchUpdateTags(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNSitesServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if v.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := v.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VPNSite](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnSiteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnSiteName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, vpnSiteNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + v.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + v.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + v.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (v *VPNSitesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if v.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := v.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnSiteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnSiteName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDelete(req.Context(), resourceGroupNameParam, vpnSiteNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + v.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + v.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + v.beginDelete.remove(req) + } + + return resp, nil +} + +func (v *VPNSitesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if v.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnSiteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnSiteName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.Get(req.Context(), resourceGroupNameParam, vpnSiteNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNSite, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (v *VPNSitesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := v.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := v.srv.NewListPager(nil) + newListPager = &resp + v.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.VPNSitesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + v.newListPager.remove(req) + } + return resp, nil +} + +func (v *VPNSitesServerTransport) dispatchNewListByResourceGroupPager(req *http.Request) (*http.Response, error) { + if v.srv.NewListByResourceGroupPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByResourceGroupPager not implemented")} + } + newListByResourceGroupPager := v.newListByResourceGroupPager.get(req) + if newListByResourceGroupPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := v.srv.NewListByResourceGroupPager(resourceGroupNameParam, nil) + newListByResourceGroupPager = &resp + v.newListByResourceGroupPager.add(req, newListByResourceGroupPager) + server.PagerResponderInjectNextLinks(newListByResourceGroupPager, req, func(page *armnetwork.VPNSitesClientListByResourceGroupResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByResourceGroupPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + v.newListByResourceGroupPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByResourceGroupPager) { + v.newListByResourceGroupPager.remove(req) + } + return resp, nil +} + +func (v *VPNSitesServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if v.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/vpnSites/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + vpnSiteNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("vpnSiteName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.UpdateTags(req.Context(), resourceGroupNameParam, vpnSiteNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).VPNSite, req) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitesconfiguration_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitesconfiguration_server.go new file mode 100644 index 00000000000..3268ae3629c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/vpnsitesconfiguration_server.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// VPNSitesConfigurationServer is a fake server for instances of the armnetwork.VPNSitesConfigurationClient type. +type VPNSitesConfigurationServer struct { + // BeginDownload is the fake for method VPNSitesConfigurationClient.BeginDownload + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginDownload func(ctx context.Context, resourceGroupName string, virtualWANName string, request armnetwork.GetVPNSitesConfigurationRequest, options *armnetwork.VPNSitesConfigurationClientBeginDownloadOptions) (resp azfake.PollerResponder[armnetwork.VPNSitesConfigurationClientDownloadResponse], errResp azfake.ErrorResponder) +} + +// NewVPNSitesConfigurationServerTransport creates a new instance of VPNSitesConfigurationServerTransport with the provided implementation. +// The returned VPNSitesConfigurationServerTransport instance is connected to an instance of armnetwork.VPNSitesConfigurationClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewVPNSitesConfigurationServerTransport(srv *VPNSitesConfigurationServer) *VPNSitesConfigurationServerTransport { + return &VPNSitesConfigurationServerTransport{ + srv: srv, + beginDownload: newTracker[azfake.PollerResponder[armnetwork.VPNSitesConfigurationClientDownloadResponse]](), + } +} + +// VPNSitesConfigurationServerTransport connects instances of armnetwork.VPNSitesConfigurationClient to instances of VPNSitesConfigurationServer. +// Don't use this type directly, use NewVPNSitesConfigurationServerTransport instead. +type VPNSitesConfigurationServerTransport struct { + srv *VPNSitesConfigurationServer + beginDownload *tracker[azfake.PollerResponder[armnetwork.VPNSitesConfigurationClientDownloadResponse]] +} + +// Do implements the policy.Transporter interface for VPNSitesConfigurationServerTransport. +func (v *VPNSitesConfigurationServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "VPNSitesConfigurationClient.BeginDownload": + resp, err = v.dispatchBeginDownload(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (v *VPNSitesConfigurationServerTransport) dispatchBeginDownload(req *http.Request) (*http.Response, error) { + if v.srv.BeginDownload == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDownload not implemented")} + } + beginDownload := v.beginDownload.get(req) + if beginDownload == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/virtualWans/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/vpnConfiguration` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.GetVPNSitesConfigurationRequest](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + virtualWANNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("virtualWANName")]) + if err != nil { + return nil, err + } + respr, errRespr := v.srv.BeginDownload(req.Context(), resourceGroupNameParam, virtualWANNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDownload = &respr + v.beginDownload.add(req, beginDownload) + } + + resp, err := server.PollerResponderNext(beginDownload, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + v.beginDownload.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDownload) { + v.beginDownload.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/watchers_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/watchers_server.go new file mode 100644 index 00000000000..64d71e37e11 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/watchers_server.go @@ -0,0 +1,979 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// WatchersServer is a fake server for instances of the armnetwork.WatchersClient type. +type WatchersServer struct { + // BeginCheckConnectivity is the fake for method WatchersClient.BeginCheckConnectivity + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginCheckConnectivity func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.ConnectivityParameters, options *armnetwork.WatchersClientBeginCheckConnectivityOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientCheckConnectivityResponse], errResp azfake.ErrorResponder) + + // CreateOrUpdate is the fake for method WatchersClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.Watcher, options *armnetwork.WatchersClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.WatchersClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method WatchersClient.BeginDelete + // HTTP status codes to indicate success: http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, networkWatcherName string, options *armnetwork.WatchersClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method WatchersClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, networkWatcherName string, options *armnetwork.WatchersClientGetOptions) (resp azfake.Responder[armnetwork.WatchersClientGetResponse], errResp azfake.ErrorResponder) + + // BeginGetAzureReachabilityReport is the fake for method WatchersClient.BeginGetAzureReachabilityReport + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetAzureReachabilityReport func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.AzureReachabilityReportParameters, options *armnetwork.WatchersClientBeginGetAzureReachabilityReportOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientGetAzureReachabilityReportResponse], errResp azfake.ErrorResponder) + + // BeginGetFlowLogStatus is the fake for method WatchersClient.BeginGetFlowLogStatus + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetFlowLogStatus func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.FlowLogStatusParameters, options *armnetwork.WatchersClientBeginGetFlowLogStatusOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientGetFlowLogStatusResponse], errResp azfake.ErrorResponder) + + // BeginGetNetworkConfigurationDiagnostic is the fake for method WatchersClient.BeginGetNetworkConfigurationDiagnostic + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetNetworkConfigurationDiagnostic func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.ConfigurationDiagnosticParameters, options *armnetwork.WatchersClientBeginGetNetworkConfigurationDiagnosticOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientGetNetworkConfigurationDiagnosticResponse], errResp azfake.ErrorResponder) + + // BeginGetNextHop is the fake for method WatchersClient.BeginGetNextHop + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetNextHop func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.NextHopParameters, options *armnetwork.WatchersClientBeginGetNextHopOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientGetNextHopResponse], errResp azfake.ErrorResponder) + + // GetTopology is the fake for method WatchersClient.GetTopology + // HTTP status codes to indicate success: http.StatusOK + GetTopology func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.TopologyParameters, options *armnetwork.WatchersClientGetTopologyOptions) (resp azfake.Responder[armnetwork.WatchersClientGetTopologyResponse], errResp azfake.ErrorResponder) + + // BeginGetTroubleshooting is the fake for method WatchersClient.BeginGetTroubleshooting + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetTroubleshooting func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.TroubleshootingParameters, options *armnetwork.WatchersClientBeginGetTroubleshootingOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientGetTroubleshootingResponse], errResp azfake.ErrorResponder) + + // BeginGetTroubleshootingResult is the fake for method WatchersClient.BeginGetTroubleshootingResult + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetTroubleshootingResult func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.QueryTroubleshootingParameters, options *armnetwork.WatchersClientBeginGetTroubleshootingResultOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientGetTroubleshootingResultResponse], errResp azfake.ErrorResponder) + + // BeginGetVMSecurityRules is the fake for method WatchersClient.BeginGetVMSecurityRules + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginGetVMSecurityRules func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.SecurityGroupViewParameters, options *armnetwork.WatchersClientBeginGetVMSecurityRulesOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientGetVMSecurityRulesResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method WatchersClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.WatchersClientListOptions) (resp azfake.PagerResponder[armnetwork.WatchersClientListResponse]) + + // NewListAllPager is the fake for method WatchersClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.WatchersClientListAllOptions) (resp azfake.PagerResponder[armnetwork.WatchersClientListAllResponse]) + + // BeginListAvailableProviders is the fake for method WatchersClient.BeginListAvailableProviders + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginListAvailableProviders func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.AvailableProvidersListParameters, options *armnetwork.WatchersClientBeginListAvailableProvidersOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientListAvailableProvidersResponse], errResp azfake.ErrorResponder) + + // BeginSetFlowLogConfiguration is the fake for method WatchersClient.BeginSetFlowLogConfiguration + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginSetFlowLogConfiguration func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.FlowLogInformation, options *armnetwork.WatchersClientBeginSetFlowLogConfigurationOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientSetFlowLogConfigurationResponse], errResp azfake.ErrorResponder) + + // UpdateTags is the fake for method WatchersClient.UpdateTags + // HTTP status codes to indicate success: http.StatusOK + UpdateTags func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.TagsObject, options *armnetwork.WatchersClientUpdateTagsOptions) (resp azfake.Responder[armnetwork.WatchersClientUpdateTagsResponse], errResp azfake.ErrorResponder) + + // BeginVerifyIPFlow is the fake for method WatchersClient.BeginVerifyIPFlow + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginVerifyIPFlow func(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters armnetwork.VerificationIPFlowParameters, options *armnetwork.WatchersClientBeginVerifyIPFlowOptions) (resp azfake.PollerResponder[armnetwork.WatchersClientVerifyIPFlowResponse], errResp azfake.ErrorResponder) +} + +// NewWatchersServerTransport creates a new instance of WatchersServerTransport with the provided implementation. +// The returned WatchersServerTransport instance is connected to an instance of armnetwork.WatchersClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewWatchersServerTransport(srv *WatchersServer) *WatchersServerTransport { + return &WatchersServerTransport{ + srv: srv, + beginCheckConnectivity: newTracker[azfake.PollerResponder[armnetwork.WatchersClientCheckConnectivityResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armnetwork.WatchersClientDeleteResponse]](), + beginGetAzureReachabilityReport: newTracker[azfake.PollerResponder[armnetwork.WatchersClientGetAzureReachabilityReportResponse]](), + beginGetFlowLogStatus: newTracker[azfake.PollerResponder[armnetwork.WatchersClientGetFlowLogStatusResponse]](), + beginGetNetworkConfigurationDiagnostic: newTracker[azfake.PollerResponder[armnetwork.WatchersClientGetNetworkConfigurationDiagnosticResponse]](), + beginGetNextHop: newTracker[azfake.PollerResponder[armnetwork.WatchersClientGetNextHopResponse]](), + beginGetTroubleshooting: newTracker[azfake.PollerResponder[armnetwork.WatchersClientGetTroubleshootingResponse]](), + beginGetTroubleshootingResult: newTracker[azfake.PollerResponder[armnetwork.WatchersClientGetTroubleshootingResultResponse]](), + beginGetVMSecurityRules: newTracker[azfake.PollerResponder[armnetwork.WatchersClientGetVMSecurityRulesResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.WatchersClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.WatchersClientListAllResponse]](), + beginListAvailableProviders: newTracker[azfake.PollerResponder[armnetwork.WatchersClientListAvailableProvidersResponse]](), + beginSetFlowLogConfiguration: newTracker[azfake.PollerResponder[armnetwork.WatchersClientSetFlowLogConfigurationResponse]](), + beginVerifyIPFlow: newTracker[azfake.PollerResponder[armnetwork.WatchersClientVerifyIPFlowResponse]](), + } +} + +// WatchersServerTransport connects instances of armnetwork.WatchersClient to instances of WatchersServer. +// Don't use this type directly, use NewWatchersServerTransport instead. +type WatchersServerTransport struct { + srv *WatchersServer + beginCheckConnectivity *tracker[azfake.PollerResponder[armnetwork.WatchersClientCheckConnectivityResponse]] + beginDelete *tracker[azfake.PollerResponder[armnetwork.WatchersClientDeleteResponse]] + beginGetAzureReachabilityReport *tracker[azfake.PollerResponder[armnetwork.WatchersClientGetAzureReachabilityReportResponse]] + beginGetFlowLogStatus *tracker[azfake.PollerResponder[armnetwork.WatchersClientGetFlowLogStatusResponse]] + beginGetNetworkConfigurationDiagnostic *tracker[azfake.PollerResponder[armnetwork.WatchersClientGetNetworkConfigurationDiagnosticResponse]] + beginGetNextHop *tracker[azfake.PollerResponder[armnetwork.WatchersClientGetNextHopResponse]] + beginGetTroubleshooting *tracker[azfake.PollerResponder[armnetwork.WatchersClientGetTroubleshootingResponse]] + beginGetTroubleshootingResult *tracker[azfake.PollerResponder[armnetwork.WatchersClientGetTroubleshootingResultResponse]] + beginGetVMSecurityRules *tracker[azfake.PollerResponder[armnetwork.WatchersClientGetVMSecurityRulesResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.WatchersClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.WatchersClientListAllResponse]] + beginListAvailableProviders *tracker[azfake.PollerResponder[armnetwork.WatchersClientListAvailableProvidersResponse]] + beginSetFlowLogConfiguration *tracker[azfake.PollerResponder[armnetwork.WatchersClientSetFlowLogConfigurationResponse]] + beginVerifyIPFlow *tracker[azfake.PollerResponder[armnetwork.WatchersClientVerifyIPFlowResponse]] +} + +// Do implements the policy.Transporter interface for WatchersServerTransport. +func (w *WatchersServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "WatchersClient.BeginCheckConnectivity": + resp, err = w.dispatchBeginCheckConnectivity(req) + case "WatchersClient.CreateOrUpdate": + resp, err = w.dispatchCreateOrUpdate(req) + case "WatchersClient.BeginDelete": + resp, err = w.dispatchBeginDelete(req) + case "WatchersClient.Get": + resp, err = w.dispatchGet(req) + case "WatchersClient.BeginGetAzureReachabilityReport": + resp, err = w.dispatchBeginGetAzureReachabilityReport(req) + case "WatchersClient.BeginGetFlowLogStatus": + resp, err = w.dispatchBeginGetFlowLogStatus(req) + case "WatchersClient.BeginGetNetworkConfigurationDiagnostic": + resp, err = w.dispatchBeginGetNetworkConfigurationDiagnostic(req) + case "WatchersClient.BeginGetNextHop": + resp, err = w.dispatchBeginGetNextHop(req) + case "WatchersClient.GetTopology": + resp, err = w.dispatchGetTopology(req) + case "WatchersClient.BeginGetTroubleshooting": + resp, err = w.dispatchBeginGetTroubleshooting(req) + case "WatchersClient.BeginGetTroubleshootingResult": + resp, err = w.dispatchBeginGetTroubleshootingResult(req) + case "WatchersClient.BeginGetVMSecurityRules": + resp, err = w.dispatchBeginGetVMSecurityRules(req) + case "WatchersClient.NewListPager": + resp, err = w.dispatchNewListPager(req) + case "WatchersClient.NewListAllPager": + resp, err = w.dispatchNewListAllPager(req) + case "WatchersClient.BeginListAvailableProviders": + resp, err = w.dispatchBeginListAvailableProviders(req) + case "WatchersClient.BeginSetFlowLogConfiguration": + resp, err = w.dispatchBeginSetFlowLogConfiguration(req) + case "WatchersClient.UpdateTags": + resp, err = w.dispatchUpdateTags(req) + case "WatchersClient.BeginVerifyIPFlow": + resp, err = w.dispatchBeginVerifyIPFlow(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginCheckConnectivity(req *http.Request) (*http.Response, error) { + if w.srv.BeginCheckConnectivity == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCheckConnectivity not implemented")} + } + beginCheckConnectivity := w.beginCheckConnectivity.get(req) + if beginCheckConnectivity == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/connectivityCheck` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ConnectivityParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginCheckConnectivity(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCheckConnectivity = &respr + w.beginCheckConnectivity.add(req, beginCheckConnectivity) + } + + resp, err := server.PollerResponderNext(beginCheckConnectivity, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginCheckConnectivity.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCheckConnectivity) { + w.beginCheckConnectivity.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if w.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.Watcher](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Watcher, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if w.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := w.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginDelete(req.Context(), resourceGroupNameParam, networkWatcherNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + w.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + w.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + w.beginDelete.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if w.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.Get(req.Context(), resourceGroupNameParam, networkWatcherNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Watcher, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginGetAzureReachabilityReport(req *http.Request) (*http.Response, error) { + if w.srv.BeginGetAzureReachabilityReport == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetAzureReachabilityReport not implemented")} + } + beginGetAzureReachabilityReport := w.beginGetAzureReachabilityReport.get(req) + if beginGetAzureReachabilityReport == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/azureReachabilityReport` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.AzureReachabilityReportParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginGetAzureReachabilityReport(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetAzureReachabilityReport = &respr + w.beginGetAzureReachabilityReport.add(req, beginGetAzureReachabilityReport) + } + + resp, err := server.PollerResponderNext(beginGetAzureReachabilityReport, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginGetAzureReachabilityReport.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetAzureReachabilityReport) { + w.beginGetAzureReachabilityReport.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginGetFlowLogStatus(req *http.Request) (*http.Response, error) { + if w.srv.BeginGetFlowLogStatus == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetFlowLogStatus not implemented")} + } + beginGetFlowLogStatus := w.beginGetFlowLogStatus.get(req) + if beginGetFlowLogStatus == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/queryFlowLogStatus` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.FlowLogStatusParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginGetFlowLogStatus(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetFlowLogStatus = &respr + w.beginGetFlowLogStatus.add(req, beginGetFlowLogStatus) + } + + resp, err := server.PollerResponderNext(beginGetFlowLogStatus, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginGetFlowLogStatus.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetFlowLogStatus) { + w.beginGetFlowLogStatus.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginGetNetworkConfigurationDiagnostic(req *http.Request) (*http.Response, error) { + if w.srv.BeginGetNetworkConfigurationDiagnostic == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetNetworkConfigurationDiagnostic not implemented")} + } + beginGetNetworkConfigurationDiagnostic := w.beginGetNetworkConfigurationDiagnostic.get(req) + if beginGetNetworkConfigurationDiagnostic == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/networkConfigurationDiagnostic` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.ConfigurationDiagnosticParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginGetNetworkConfigurationDiagnostic(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetNetworkConfigurationDiagnostic = &respr + w.beginGetNetworkConfigurationDiagnostic.add(req, beginGetNetworkConfigurationDiagnostic) + } + + resp, err := server.PollerResponderNext(beginGetNetworkConfigurationDiagnostic, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginGetNetworkConfigurationDiagnostic.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetNetworkConfigurationDiagnostic) { + w.beginGetNetworkConfigurationDiagnostic.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginGetNextHop(req *http.Request) (*http.Response, error) { + if w.srv.BeginGetNextHop == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetNextHop not implemented")} + } + beginGetNextHop := w.beginGetNextHop.get(req) + if beginGetNextHop == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/nextHop` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.NextHopParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginGetNextHop(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetNextHop = &respr + w.beginGetNextHop.add(req, beginGetNextHop) + } + + resp, err := server.PollerResponderNext(beginGetNextHop, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginGetNextHop.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetNextHop) { + w.beginGetNextHop.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchGetTopology(req *http.Request) (*http.Response, error) { + if w.srv.GetTopology == nil { + return nil, &nonRetriableError{errors.New("fake for method GetTopology not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/topology` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TopologyParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.GetTopology(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Topology, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginGetTroubleshooting(req *http.Request) (*http.Response, error) { + if w.srv.BeginGetTroubleshooting == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetTroubleshooting not implemented")} + } + beginGetTroubleshooting := w.beginGetTroubleshooting.get(req) + if beginGetTroubleshooting == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/troubleshoot` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TroubleshootingParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginGetTroubleshooting(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetTroubleshooting = &respr + w.beginGetTroubleshooting.add(req, beginGetTroubleshooting) + } + + resp, err := server.PollerResponderNext(beginGetTroubleshooting, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginGetTroubleshooting.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetTroubleshooting) { + w.beginGetTroubleshooting.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginGetTroubleshootingResult(req *http.Request) (*http.Response, error) { + if w.srv.BeginGetTroubleshootingResult == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetTroubleshootingResult not implemented")} + } + beginGetTroubleshootingResult := w.beginGetTroubleshootingResult.get(req) + if beginGetTroubleshootingResult == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/queryTroubleshootResult` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.QueryTroubleshootingParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginGetTroubleshootingResult(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetTroubleshootingResult = &respr + w.beginGetTroubleshootingResult.add(req, beginGetTroubleshootingResult) + } + + resp, err := server.PollerResponderNext(beginGetTroubleshootingResult, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginGetTroubleshootingResult.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetTroubleshootingResult) { + w.beginGetTroubleshootingResult.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginGetVMSecurityRules(req *http.Request) (*http.Response, error) { + if w.srv.BeginGetVMSecurityRules == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginGetVMSecurityRules not implemented")} + } + beginGetVMSecurityRules := w.beginGetVMSecurityRules.get(req) + if beginGetVMSecurityRules == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/securityGroupView` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.SecurityGroupViewParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginGetVMSecurityRules(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginGetVMSecurityRules = &respr + w.beginGetVMSecurityRules.add(req, beginGetVMSecurityRules) + } + + resp, err := server.PollerResponderNext(beginGetVMSecurityRules, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginGetVMSecurityRules.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginGetVMSecurityRules) { + w.beginGetVMSecurityRules.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if w.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := w.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := w.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + w.newListPager.add(req, newListPager) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + w.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + w.newListPager.remove(req) + } + return resp, nil +} + +func (w *WatchersServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if w.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := w.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := w.srv.NewListAllPager(nil) + newListAllPager = &resp + w.newListAllPager.add(req, newListAllPager) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + w.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + w.newListAllPager.remove(req) + } + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginListAvailableProviders(req *http.Request) (*http.Response, error) { + if w.srv.BeginListAvailableProviders == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginListAvailableProviders not implemented")} + } + beginListAvailableProviders := w.beginListAvailableProviders.get(req) + if beginListAvailableProviders == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availableProvidersList` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.AvailableProvidersListParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginListAvailableProviders(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginListAvailableProviders = &respr + w.beginListAvailableProviders.add(req, beginListAvailableProviders) + } + + resp, err := server.PollerResponderNext(beginListAvailableProviders, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginListAvailableProviders.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginListAvailableProviders) { + w.beginListAvailableProviders.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginSetFlowLogConfiguration(req *http.Request) (*http.Response, error) { + if w.srv.BeginSetFlowLogConfiguration == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginSetFlowLogConfiguration not implemented")} + } + beginSetFlowLogConfiguration := w.beginSetFlowLogConfiguration.get(req) + if beginSetFlowLogConfiguration == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/configureFlowLog` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.FlowLogInformation](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginSetFlowLogConfiguration(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginSetFlowLogConfiguration = &respr + w.beginSetFlowLogConfiguration.add(req, beginSetFlowLogConfiguration) + } + + resp, err := server.PollerResponderNext(beginSetFlowLogConfiguration, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginSetFlowLogConfiguration.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginSetFlowLogConfiguration) { + w.beginSetFlowLogConfiguration.remove(req) + } + + return resp, nil +} + +func (w *WatchersServerTransport) dispatchUpdateTags(req *http.Request) (*http.Response, error) { + if w.srv.UpdateTags == nil { + return nil, &nonRetriableError{errors.New("fake for method UpdateTags not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.TagsObject](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.UpdateTags(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).Watcher, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (w *WatchersServerTransport) dispatchBeginVerifyIPFlow(req *http.Request) (*http.Response, error) { + if w.srv.BeginVerifyIPFlow == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginVerifyIPFlow not implemented")} + } + beginVerifyIPFlow := w.beginVerifyIPFlow.get(req) + if beginVerifyIPFlow == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/networkWatchers/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/ipFlowVerify` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.VerificationIPFlowParameters](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + networkWatcherNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("networkWatcherName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginVerifyIPFlow(req.Context(), resourceGroupNameParam, networkWatcherNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginVerifyIPFlow = &respr + w.beginVerifyIPFlow.add(req, beginVerifyIPFlow) + } + + resp, err := server.PollerResponderNext(beginVerifyIPFlow, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + w.beginVerifyIPFlow.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginVerifyIPFlow) { + w.beginVerifyIPFlow.remove(req) + } + + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/webapplicationfirewallpolicies_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/webapplicationfirewallpolicies_server.go new file mode 100644 index 00000000000..1264b2b45fc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/webapplicationfirewallpolicies_server.go @@ -0,0 +1,284 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// WebApplicationFirewallPoliciesServer is a fake server for instances of the armnetwork.WebApplicationFirewallPoliciesClient type. +type WebApplicationFirewallPoliciesServer struct { + // CreateOrUpdate is the fake for method WebApplicationFirewallPoliciesClient.CreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + CreateOrUpdate func(ctx context.Context, resourceGroupName string, policyName string, parameters armnetwork.WebApplicationFirewallPolicy, options *armnetwork.WebApplicationFirewallPoliciesClientCreateOrUpdateOptions) (resp azfake.Responder[armnetwork.WebApplicationFirewallPoliciesClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method WebApplicationFirewallPoliciesClient.BeginDelete + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, policyName string, options *armnetwork.WebApplicationFirewallPoliciesClientBeginDeleteOptions) (resp azfake.PollerResponder[armnetwork.WebApplicationFirewallPoliciesClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method WebApplicationFirewallPoliciesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, policyName string, options *armnetwork.WebApplicationFirewallPoliciesClientGetOptions) (resp azfake.Responder[armnetwork.WebApplicationFirewallPoliciesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListPager is the fake for method WebApplicationFirewallPoliciesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, options *armnetwork.WebApplicationFirewallPoliciesClientListOptions) (resp azfake.PagerResponder[armnetwork.WebApplicationFirewallPoliciesClientListResponse]) + + // NewListAllPager is the fake for method WebApplicationFirewallPoliciesClient.NewListAllPager + // HTTP status codes to indicate success: http.StatusOK + NewListAllPager func(options *armnetwork.WebApplicationFirewallPoliciesClientListAllOptions) (resp azfake.PagerResponder[armnetwork.WebApplicationFirewallPoliciesClientListAllResponse]) +} + +// NewWebApplicationFirewallPoliciesServerTransport creates a new instance of WebApplicationFirewallPoliciesServerTransport with the provided implementation. +// The returned WebApplicationFirewallPoliciesServerTransport instance is connected to an instance of armnetwork.WebApplicationFirewallPoliciesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewWebApplicationFirewallPoliciesServerTransport(srv *WebApplicationFirewallPoliciesServer) *WebApplicationFirewallPoliciesServerTransport { + return &WebApplicationFirewallPoliciesServerTransport{ + srv: srv, + beginDelete: newTracker[azfake.PollerResponder[armnetwork.WebApplicationFirewallPoliciesClientDeleteResponse]](), + newListPager: newTracker[azfake.PagerResponder[armnetwork.WebApplicationFirewallPoliciesClientListResponse]](), + newListAllPager: newTracker[azfake.PagerResponder[armnetwork.WebApplicationFirewallPoliciesClientListAllResponse]](), + } +} + +// WebApplicationFirewallPoliciesServerTransport connects instances of armnetwork.WebApplicationFirewallPoliciesClient to instances of WebApplicationFirewallPoliciesServer. +// Don't use this type directly, use NewWebApplicationFirewallPoliciesServerTransport instead. +type WebApplicationFirewallPoliciesServerTransport struct { + srv *WebApplicationFirewallPoliciesServer + beginDelete *tracker[azfake.PollerResponder[armnetwork.WebApplicationFirewallPoliciesClientDeleteResponse]] + newListPager *tracker[azfake.PagerResponder[armnetwork.WebApplicationFirewallPoliciesClientListResponse]] + newListAllPager *tracker[azfake.PagerResponder[armnetwork.WebApplicationFirewallPoliciesClientListAllResponse]] +} + +// Do implements the policy.Transporter interface for WebApplicationFirewallPoliciesServerTransport. +func (w *WebApplicationFirewallPoliciesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "WebApplicationFirewallPoliciesClient.CreateOrUpdate": + resp, err = w.dispatchCreateOrUpdate(req) + case "WebApplicationFirewallPoliciesClient.BeginDelete": + resp, err = w.dispatchBeginDelete(req) + case "WebApplicationFirewallPoliciesClient.Get": + resp, err = w.dispatchGet(req) + case "WebApplicationFirewallPoliciesClient.NewListPager": + resp, err = w.dispatchNewListPager(req) + case "WebApplicationFirewallPoliciesClient.NewListAllPager": + resp, err = w.dispatchNewListAllPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (w *WebApplicationFirewallPoliciesServerTransport) dispatchCreateOrUpdate(req *http.Request) (*http.Response, error) { + if w.srv.CreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method CreateOrUpdate not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ApplicationGatewayWebApplicationFirewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armnetwork.WebApplicationFirewallPolicy](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + policyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("policyName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.CreateOrUpdate(req.Context(), resourceGroupNameParam, policyNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK, http.StatusCreated}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).WebApplicationFirewallPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (w *WebApplicationFirewallPoliciesServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if w.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := w.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ApplicationGatewayWebApplicationFirewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + policyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("policyName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.BeginDelete(req.Context(), resourceGroupNameParam, policyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + w.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + w.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + w.beginDelete.remove(req) + } + + return resp, nil +} + +func (w *WebApplicationFirewallPoliciesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if w.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ApplicationGatewayWebApplicationFirewallPolicies/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + policyNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("policyName")]) + if err != nil { + return nil, err + } + respr, errRespr := w.srv.Get(req.Context(), resourceGroupNameParam, policyNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).WebApplicationFirewallPolicy, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (w *WebApplicationFirewallPoliciesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if w.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := w.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ApplicationGatewayWebApplicationFirewallPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + resp := w.srv.NewListPager(resourceGroupNameParam, nil) + newListPager = &resp + w.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armnetwork.WebApplicationFirewallPoliciesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + w.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + w.newListPager.remove(req) + } + return resp, nil +} + +func (w *WebApplicationFirewallPoliciesServerTransport) dispatchNewListAllPager(req *http.Request) (*http.Response, error) { + if w.srv.NewListAllPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListAllPager not implemented")} + } + newListAllPager := w.newListAllPager.get(req) + if newListAllPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/ApplicationGatewayWebApplicationFirewallPolicies` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := w.srv.NewListAllPager(nil) + newListAllPager = &resp + w.newListAllPager.add(req, newListAllPager) + server.PagerResponderInjectNextLinks(newListAllPager, req, func(page *armnetwork.WebApplicationFirewallPoliciesClientListAllResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListAllPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + w.newListAllPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListAllPager) { + w.newListAllPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/webcategories_server.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/webcategories_server.go new file mode 100644 index 00000000000..4d5982d3c84 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake/webcategories_server.go @@ -0,0 +1,152 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "net/http" + "net/url" + "regexp" +) + +// WebCategoriesServer is a fake server for instances of the armnetwork.WebCategoriesClient type. +type WebCategoriesServer struct { + // Get is the fake for method WebCategoriesClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, name string, options *armnetwork.WebCategoriesClientGetOptions) (resp azfake.Responder[armnetwork.WebCategoriesClientGetResponse], errResp azfake.ErrorResponder) + + // NewListBySubscriptionPager is the fake for method WebCategoriesClient.NewListBySubscriptionPager + // HTTP status codes to indicate success: http.StatusOK + NewListBySubscriptionPager func(options *armnetwork.WebCategoriesClientListBySubscriptionOptions) (resp azfake.PagerResponder[armnetwork.WebCategoriesClientListBySubscriptionResponse]) +} + +// NewWebCategoriesServerTransport creates a new instance of WebCategoriesServerTransport with the provided implementation. +// The returned WebCategoriesServerTransport instance is connected to an instance of armnetwork.WebCategoriesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewWebCategoriesServerTransport(srv *WebCategoriesServer) *WebCategoriesServerTransport { + return &WebCategoriesServerTransport{ + srv: srv, + newListBySubscriptionPager: newTracker[azfake.PagerResponder[armnetwork.WebCategoriesClientListBySubscriptionResponse]](), + } +} + +// WebCategoriesServerTransport connects instances of armnetwork.WebCategoriesClient to instances of WebCategoriesServer. +// Don't use this type directly, use NewWebCategoriesServerTransport instead. +type WebCategoriesServerTransport struct { + srv *WebCategoriesServer + newListBySubscriptionPager *tracker[azfake.PagerResponder[armnetwork.WebCategoriesClientListBySubscriptionResponse]] +} + +// Do implements the policy.Transporter interface for WebCategoriesServerTransport. +func (w *WebCategoriesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "WebCategoriesClient.Get": + resp, err = w.dispatchGet(req) + case "WebCategoriesClient.NewListBySubscriptionPager": + resp, err = w.dispatchNewListBySubscriptionPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (w *WebCategoriesServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if w.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureWebCategories/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 2 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + qp := req.URL.Query() + nameParam, err := url.PathUnescape(matches[regex.SubexpIndex("name")]) + if err != nil { + return nil, err + } + expandUnescaped, err := url.QueryUnescape(qp.Get("$expand")) + if err != nil { + return nil, err + } + expandParam := getOptional(expandUnescaped) + var options *armnetwork.WebCategoriesClientGetOptions + if expandParam != nil { + options = &armnetwork.WebCategoriesClientGetOptions{ + Expand: expandParam, + } + } + respr, errRespr := w.srv.Get(req.Context(), nameParam, options) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AzureWebCategory, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (w *WebCategoriesServerTransport) dispatchNewListBySubscriptionPager(req *http.Request) (*http.Response, error) { + if w.srv.NewListBySubscriptionPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListBySubscriptionPager not implemented")} + } + newListBySubscriptionPager := w.newListBySubscriptionPager.get(req) + if newListBySubscriptionPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.Network/azureWebCategories` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 1 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resp := w.srv.NewListBySubscriptionPager(nil) + newListBySubscriptionPager = &resp + w.newListBySubscriptionPager.add(req, newListBySubscriptionPager) + server.PagerResponderInjectNextLinks(newListBySubscriptionPager, req, func(page *armnetwork.WebCategoriesClientListBySubscriptionResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListBySubscriptionPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + w.newListBySubscriptionPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListBySubscriptionPager) { + w.newListBySubscriptionPager.remove(req) + } + return resp, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go new file mode 100644 index 00000000000..032a3d79d35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go @@ -0,0 +1,3092 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ec2iface provides an interface to enable mocking the Amazon Elastic Compute Cloud service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ec2iface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" +) + +// EC2API provides an interface to enable mocking the +// ec2.EC2 service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon Elastic Compute Cloud. +// func myFunc(svc ec2iface.EC2API) bool { +// // Make svc.AcceptAddressTransfer request +// } +// +// func main() { +// sess := session.New() +// svc := ec2.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockEC2Client struct { +// ec2iface.EC2API +// } +// func (m *mockEC2Client) AcceptAddressTransfer(input *ec2.AcceptAddressTransferInput) (*ec2.AcceptAddressTransferOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockEC2Client{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type EC2API interface { + AcceptAddressTransfer(*ec2.AcceptAddressTransferInput) (*ec2.AcceptAddressTransferOutput, error) + AcceptAddressTransferWithContext(aws.Context, *ec2.AcceptAddressTransferInput, ...request.Option) (*ec2.AcceptAddressTransferOutput, error) + AcceptAddressTransferRequest(*ec2.AcceptAddressTransferInput) (*request.Request, *ec2.AcceptAddressTransferOutput) + + AcceptReservedInstancesExchangeQuote(*ec2.AcceptReservedInstancesExchangeQuoteInput) (*ec2.AcceptReservedInstancesExchangeQuoteOutput, error) + AcceptReservedInstancesExchangeQuoteWithContext(aws.Context, *ec2.AcceptReservedInstancesExchangeQuoteInput, ...request.Option) (*ec2.AcceptReservedInstancesExchangeQuoteOutput, error) + AcceptReservedInstancesExchangeQuoteRequest(*ec2.AcceptReservedInstancesExchangeQuoteInput) (*request.Request, *ec2.AcceptReservedInstancesExchangeQuoteOutput) + + AcceptTransitGatewayMulticastDomainAssociations(*ec2.AcceptTransitGatewayMulticastDomainAssociationsInput) (*ec2.AcceptTransitGatewayMulticastDomainAssociationsOutput, error) + AcceptTransitGatewayMulticastDomainAssociationsWithContext(aws.Context, *ec2.AcceptTransitGatewayMulticastDomainAssociationsInput, ...request.Option) (*ec2.AcceptTransitGatewayMulticastDomainAssociationsOutput, error) + AcceptTransitGatewayMulticastDomainAssociationsRequest(*ec2.AcceptTransitGatewayMulticastDomainAssociationsInput) (*request.Request, *ec2.AcceptTransitGatewayMulticastDomainAssociationsOutput) + + AcceptTransitGatewayPeeringAttachment(*ec2.AcceptTransitGatewayPeeringAttachmentInput) (*ec2.AcceptTransitGatewayPeeringAttachmentOutput, error) + AcceptTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.AcceptTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.AcceptTransitGatewayPeeringAttachmentOutput, error) + AcceptTransitGatewayPeeringAttachmentRequest(*ec2.AcceptTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.AcceptTransitGatewayPeeringAttachmentOutput) + + AcceptTransitGatewayVpcAttachment(*ec2.AcceptTransitGatewayVpcAttachmentInput) (*ec2.AcceptTransitGatewayVpcAttachmentOutput, error) + AcceptTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.AcceptTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.AcceptTransitGatewayVpcAttachmentOutput, error) + AcceptTransitGatewayVpcAttachmentRequest(*ec2.AcceptTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.AcceptTransitGatewayVpcAttachmentOutput) + + AcceptVpcEndpointConnections(*ec2.AcceptVpcEndpointConnectionsInput) (*ec2.AcceptVpcEndpointConnectionsOutput, error) + AcceptVpcEndpointConnectionsWithContext(aws.Context, *ec2.AcceptVpcEndpointConnectionsInput, ...request.Option) (*ec2.AcceptVpcEndpointConnectionsOutput, error) + AcceptVpcEndpointConnectionsRequest(*ec2.AcceptVpcEndpointConnectionsInput) (*request.Request, *ec2.AcceptVpcEndpointConnectionsOutput) + + AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error) + AcceptVpcPeeringConnectionWithContext(aws.Context, *ec2.AcceptVpcPeeringConnectionInput, ...request.Option) (*ec2.AcceptVpcPeeringConnectionOutput, error) + AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput) + + AdvertiseByoipCidr(*ec2.AdvertiseByoipCidrInput) (*ec2.AdvertiseByoipCidrOutput, error) + AdvertiseByoipCidrWithContext(aws.Context, *ec2.AdvertiseByoipCidrInput, ...request.Option) (*ec2.AdvertiseByoipCidrOutput, error) + AdvertiseByoipCidrRequest(*ec2.AdvertiseByoipCidrInput) (*request.Request, *ec2.AdvertiseByoipCidrOutput) + + AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error) + AllocateAddressWithContext(aws.Context, *ec2.AllocateAddressInput, ...request.Option) (*ec2.AllocateAddressOutput, error) + AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput) + + AllocateHosts(*ec2.AllocateHostsInput) (*ec2.AllocateHostsOutput, error) + AllocateHostsWithContext(aws.Context, *ec2.AllocateHostsInput, ...request.Option) (*ec2.AllocateHostsOutput, error) + AllocateHostsRequest(*ec2.AllocateHostsInput) (*request.Request, *ec2.AllocateHostsOutput) + + AllocateIpamPoolCidr(*ec2.AllocateIpamPoolCidrInput) (*ec2.AllocateIpamPoolCidrOutput, error) + AllocateIpamPoolCidrWithContext(aws.Context, *ec2.AllocateIpamPoolCidrInput, ...request.Option) (*ec2.AllocateIpamPoolCidrOutput, error) + AllocateIpamPoolCidrRequest(*ec2.AllocateIpamPoolCidrInput) (*request.Request, *ec2.AllocateIpamPoolCidrOutput) + + ApplySecurityGroupsToClientVpnTargetNetwork(*ec2.ApplySecurityGroupsToClientVpnTargetNetworkInput) (*ec2.ApplySecurityGroupsToClientVpnTargetNetworkOutput, error) + ApplySecurityGroupsToClientVpnTargetNetworkWithContext(aws.Context, *ec2.ApplySecurityGroupsToClientVpnTargetNetworkInput, ...request.Option) (*ec2.ApplySecurityGroupsToClientVpnTargetNetworkOutput, error) + ApplySecurityGroupsToClientVpnTargetNetworkRequest(*ec2.ApplySecurityGroupsToClientVpnTargetNetworkInput) (*request.Request, *ec2.ApplySecurityGroupsToClientVpnTargetNetworkOutput) + + AssignIpv6Addresses(*ec2.AssignIpv6AddressesInput) (*ec2.AssignIpv6AddressesOutput, error) + AssignIpv6AddressesWithContext(aws.Context, *ec2.AssignIpv6AddressesInput, ...request.Option) (*ec2.AssignIpv6AddressesOutput, error) + AssignIpv6AddressesRequest(*ec2.AssignIpv6AddressesInput) (*request.Request, *ec2.AssignIpv6AddressesOutput) + + AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error) + AssignPrivateIpAddressesWithContext(aws.Context, *ec2.AssignPrivateIpAddressesInput, ...request.Option) (*ec2.AssignPrivateIpAddressesOutput, error) + AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput) + + AssignPrivateNatGatewayAddress(*ec2.AssignPrivateNatGatewayAddressInput) (*ec2.AssignPrivateNatGatewayAddressOutput, error) + AssignPrivateNatGatewayAddressWithContext(aws.Context, *ec2.AssignPrivateNatGatewayAddressInput, ...request.Option) (*ec2.AssignPrivateNatGatewayAddressOutput, error) + AssignPrivateNatGatewayAddressRequest(*ec2.AssignPrivateNatGatewayAddressInput) (*request.Request, *ec2.AssignPrivateNatGatewayAddressOutput) + + AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error) + AssociateAddressWithContext(aws.Context, *ec2.AssociateAddressInput, ...request.Option) (*ec2.AssociateAddressOutput, error) + AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput) + + AssociateClientVpnTargetNetwork(*ec2.AssociateClientVpnTargetNetworkInput) (*ec2.AssociateClientVpnTargetNetworkOutput, error) + AssociateClientVpnTargetNetworkWithContext(aws.Context, *ec2.AssociateClientVpnTargetNetworkInput, ...request.Option) (*ec2.AssociateClientVpnTargetNetworkOutput, error) + AssociateClientVpnTargetNetworkRequest(*ec2.AssociateClientVpnTargetNetworkInput) (*request.Request, *ec2.AssociateClientVpnTargetNetworkOutput) + + AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error) + AssociateDhcpOptionsWithContext(aws.Context, *ec2.AssociateDhcpOptionsInput, ...request.Option) (*ec2.AssociateDhcpOptionsOutput, error) + AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput) + + AssociateEnclaveCertificateIamRole(*ec2.AssociateEnclaveCertificateIamRoleInput) (*ec2.AssociateEnclaveCertificateIamRoleOutput, error) + AssociateEnclaveCertificateIamRoleWithContext(aws.Context, *ec2.AssociateEnclaveCertificateIamRoleInput, ...request.Option) (*ec2.AssociateEnclaveCertificateIamRoleOutput, error) + AssociateEnclaveCertificateIamRoleRequest(*ec2.AssociateEnclaveCertificateIamRoleInput) (*request.Request, *ec2.AssociateEnclaveCertificateIamRoleOutput) + + AssociateIamInstanceProfile(*ec2.AssociateIamInstanceProfileInput) (*ec2.AssociateIamInstanceProfileOutput, error) + AssociateIamInstanceProfileWithContext(aws.Context, *ec2.AssociateIamInstanceProfileInput, ...request.Option) (*ec2.AssociateIamInstanceProfileOutput, error) + AssociateIamInstanceProfileRequest(*ec2.AssociateIamInstanceProfileInput) (*request.Request, *ec2.AssociateIamInstanceProfileOutput) + + AssociateInstanceEventWindow(*ec2.AssociateInstanceEventWindowInput) (*ec2.AssociateInstanceEventWindowOutput, error) + AssociateInstanceEventWindowWithContext(aws.Context, *ec2.AssociateInstanceEventWindowInput, ...request.Option) (*ec2.AssociateInstanceEventWindowOutput, error) + AssociateInstanceEventWindowRequest(*ec2.AssociateInstanceEventWindowInput) (*request.Request, *ec2.AssociateInstanceEventWindowOutput) + + AssociateIpamByoasn(*ec2.AssociateIpamByoasnInput) (*ec2.AssociateIpamByoasnOutput, error) + AssociateIpamByoasnWithContext(aws.Context, *ec2.AssociateIpamByoasnInput, ...request.Option) (*ec2.AssociateIpamByoasnOutput, error) + AssociateIpamByoasnRequest(*ec2.AssociateIpamByoasnInput) (*request.Request, *ec2.AssociateIpamByoasnOutput) + + AssociateIpamResourceDiscovery(*ec2.AssociateIpamResourceDiscoveryInput) (*ec2.AssociateIpamResourceDiscoveryOutput, error) + AssociateIpamResourceDiscoveryWithContext(aws.Context, *ec2.AssociateIpamResourceDiscoveryInput, ...request.Option) (*ec2.AssociateIpamResourceDiscoveryOutput, error) + AssociateIpamResourceDiscoveryRequest(*ec2.AssociateIpamResourceDiscoveryInput) (*request.Request, *ec2.AssociateIpamResourceDiscoveryOutput) + + AssociateNatGatewayAddress(*ec2.AssociateNatGatewayAddressInput) (*ec2.AssociateNatGatewayAddressOutput, error) + AssociateNatGatewayAddressWithContext(aws.Context, *ec2.AssociateNatGatewayAddressInput, ...request.Option) (*ec2.AssociateNatGatewayAddressOutput, error) + AssociateNatGatewayAddressRequest(*ec2.AssociateNatGatewayAddressInput) (*request.Request, *ec2.AssociateNatGatewayAddressOutput) + + AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error) + AssociateRouteTableWithContext(aws.Context, *ec2.AssociateRouteTableInput, ...request.Option) (*ec2.AssociateRouteTableOutput, error) + AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput) + + AssociateSubnetCidrBlock(*ec2.AssociateSubnetCidrBlockInput) (*ec2.AssociateSubnetCidrBlockOutput, error) + AssociateSubnetCidrBlockWithContext(aws.Context, *ec2.AssociateSubnetCidrBlockInput, ...request.Option) (*ec2.AssociateSubnetCidrBlockOutput, error) + AssociateSubnetCidrBlockRequest(*ec2.AssociateSubnetCidrBlockInput) (*request.Request, *ec2.AssociateSubnetCidrBlockOutput) + + AssociateTransitGatewayMulticastDomain(*ec2.AssociateTransitGatewayMulticastDomainInput) (*ec2.AssociateTransitGatewayMulticastDomainOutput, error) + AssociateTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.AssociateTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.AssociateTransitGatewayMulticastDomainOutput, error) + AssociateTransitGatewayMulticastDomainRequest(*ec2.AssociateTransitGatewayMulticastDomainInput) (*request.Request, *ec2.AssociateTransitGatewayMulticastDomainOutput) + + AssociateTransitGatewayPolicyTable(*ec2.AssociateTransitGatewayPolicyTableInput) (*ec2.AssociateTransitGatewayPolicyTableOutput, error) + AssociateTransitGatewayPolicyTableWithContext(aws.Context, *ec2.AssociateTransitGatewayPolicyTableInput, ...request.Option) (*ec2.AssociateTransitGatewayPolicyTableOutput, error) + AssociateTransitGatewayPolicyTableRequest(*ec2.AssociateTransitGatewayPolicyTableInput) (*request.Request, *ec2.AssociateTransitGatewayPolicyTableOutput) + + AssociateTransitGatewayRouteTable(*ec2.AssociateTransitGatewayRouteTableInput) (*ec2.AssociateTransitGatewayRouteTableOutput, error) + AssociateTransitGatewayRouteTableWithContext(aws.Context, *ec2.AssociateTransitGatewayRouteTableInput, ...request.Option) (*ec2.AssociateTransitGatewayRouteTableOutput, error) + AssociateTransitGatewayRouteTableRequest(*ec2.AssociateTransitGatewayRouteTableInput) (*request.Request, *ec2.AssociateTransitGatewayRouteTableOutput) + + AssociateTrunkInterface(*ec2.AssociateTrunkInterfaceInput) (*ec2.AssociateTrunkInterfaceOutput, error) + AssociateTrunkInterfaceWithContext(aws.Context, *ec2.AssociateTrunkInterfaceInput, ...request.Option) (*ec2.AssociateTrunkInterfaceOutput, error) + AssociateTrunkInterfaceRequest(*ec2.AssociateTrunkInterfaceInput) (*request.Request, *ec2.AssociateTrunkInterfaceOutput) + + AssociateVpcCidrBlock(*ec2.AssociateVpcCidrBlockInput) (*ec2.AssociateVpcCidrBlockOutput, error) + AssociateVpcCidrBlockWithContext(aws.Context, *ec2.AssociateVpcCidrBlockInput, ...request.Option) (*ec2.AssociateVpcCidrBlockOutput, error) + AssociateVpcCidrBlockRequest(*ec2.AssociateVpcCidrBlockInput) (*request.Request, *ec2.AssociateVpcCidrBlockOutput) + + AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error) + AttachClassicLinkVpcWithContext(aws.Context, *ec2.AttachClassicLinkVpcInput, ...request.Option) (*ec2.AttachClassicLinkVpcOutput, error) + AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput) + + AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error) + AttachInternetGatewayWithContext(aws.Context, *ec2.AttachInternetGatewayInput, ...request.Option) (*ec2.AttachInternetGatewayOutput, error) + AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput) + + AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error) + AttachNetworkInterfaceWithContext(aws.Context, *ec2.AttachNetworkInterfaceInput, ...request.Option) (*ec2.AttachNetworkInterfaceOutput, error) + AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput) + + AttachVerifiedAccessTrustProvider(*ec2.AttachVerifiedAccessTrustProviderInput) (*ec2.AttachVerifiedAccessTrustProviderOutput, error) + AttachVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.AttachVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.AttachVerifiedAccessTrustProviderOutput, error) + AttachVerifiedAccessTrustProviderRequest(*ec2.AttachVerifiedAccessTrustProviderInput) (*request.Request, *ec2.AttachVerifiedAccessTrustProviderOutput) + + AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) + AttachVolumeWithContext(aws.Context, *ec2.AttachVolumeInput, ...request.Option) (*ec2.VolumeAttachment, error) + AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error) + AttachVpnGatewayWithContext(aws.Context, *ec2.AttachVpnGatewayInput, ...request.Option) (*ec2.AttachVpnGatewayOutput, error) + AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput) + + AuthorizeClientVpnIngress(*ec2.AuthorizeClientVpnIngressInput) (*ec2.AuthorizeClientVpnIngressOutput, error) + AuthorizeClientVpnIngressWithContext(aws.Context, *ec2.AuthorizeClientVpnIngressInput, ...request.Option) (*ec2.AuthorizeClientVpnIngressOutput, error) + AuthorizeClientVpnIngressRequest(*ec2.AuthorizeClientVpnIngressInput) (*request.Request, *ec2.AuthorizeClientVpnIngressOutput) + + AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error) + AuthorizeSecurityGroupEgressWithContext(aws.Context, *ec2.AuthorizeSecurityGroupEgressInput, ...request.Option) (*ec2.AuthorizeSecurityGroupEgressOutput, error) + AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput) + + AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) + AuthorizeSecurityGroupIngressWithContext(aws.Context, *ec2.AuthorizeSecurityGroupIngressInput, ...request.Option) (*ec2.AuthorizeSecurityGroupIngressOutput, error) + AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput) + + BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error) + BundleInstanceWithContext(aws.Context, *ec2.BundleInstanceInput, ...request.Option) (*ec2.BundleInstanceOutput, error) + BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput) + + CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error) + CancelBundleTaskWithContext(aws.Context, *ec2.CancelBundleTaskInput, ...request.Option) (*ec2.CancelBundleTaskOutput, error) + CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput) + + CancelCapacityReservation(*ec2.CancelCapacityReservationInput) (*ec2.CancelCapacityReservationOutput, error) + CancelCapacityReservationWithContext(aws.Context, *ec2.CancelCapacityReservationInput, ...request.Option) (*ec2.CancelCapacityReservationOutput, error) + CancelCapacityReservationRequest(*ec2.CancelCapacityReservationInput) (*request.Request, *ec2.CancelCapacityReservationOutput) + + CancelCapacityReservationFleets(*ec2.CancelCapacityReservationFleetsInput) (*ec2.CancelCapacityReservationFleetsOutput, error) + CancelCapacityReservationFleetsWithContext(aws.Context, *ec2.CancelCapacityReservationFleetsInput, ...request.Option) (*ec2.CancelCapacityReservationFleetsOutput, error) + CancelCapacityReservationFleetsRequest(*ec2.CancelCapacityReservationFleetsInput) (*request.Request, *ec2.CancelCapacityReservationFleetsOutput) + + CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error) + CancelConversionTaskWithContext(aws.Context, *ec2.CancelConversionTaskInput, ...request.Option) (*ec2.CancelConversionTaskOutput, error) + CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput) + + CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error) + CancelExportTaskWithContext(aws.Context, *ec2.CancelExportTaskInput, ...request.Option) (*ec2.CancelExportTaskOutput, error) + CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput) + + CancelImageLaunchPermission(*ec2.CancelImageLaunchPermissionInput) (*ec2.CancelImageLaunchPermissionOutput, error) + CancelImageLaunchPermissionWithContext(aws.Context, *ec2.CancelImageLaunchPermissionInput, ...request.Option) (*ec2.CancelImageLaunchPermissionOutput, error) + CancelImageLaunchPermissionRequest(*ec2.CancelImageLaunchPermissionInput) (*request.Request, *ec2.CancelImageLaunchPermissionOutput) + + CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error) + CancelImportTaskWithContext(aws.Context, *ec2.CancelImportTaskInput, ...request.Option) (*ec2.CancelImportTaskOutput, error) + CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput) + + CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error) + CancelReservedInstancesListingWithContext(aws.Context, *ec2.CancelReservedInstancesListingInput, ...request.Option) (*ec2.CancelReservedInstancesListingOutput, error) + CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput) + + CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error) + CancelSpotFleetRequestsWithContext(aws.Context, *ec2.CancelSpotFleetRequestsInput, ...request.Option) (*ec2.CancelSpotFleetRequestsOutput, error) + CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput) + + CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error) + CancelSpotInstanceRequestsWithContext(aws.Context, *ec2.CancelSpotInstanceRequestsInput, ...request.Option) (*ec2.CancelSpotInstanceRequestsOutput, error) + CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput) + + ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error) + ConfirmProductInstanceWithContext(aws.Context, *ec2.ConfirmProductInstanceInput, ...request.Option) (*ec2.ConfirmProductInstanceOutput, error) + ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput) + + CopyFpgaImage(*ec2.CopyFpgaImageInput) (*ec2.CopyFpgaImageOutput, error) + CopyFpgaImageWithContext(aws.Context, *ec2.CopyFpgaImageInput, ...request.Option) (*ec2.CopyFpgaImageOutput, error) + CopyFpgaImageRequest(*ec2.CopyFpgaImageInput) (*request.Request, *ec2.CopyFpgaImageOutput) + + CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error) + CopyImageWithContext(aws.Context, *ec2.CopyImageInput, ...request.Option) (*ec2.CopyImageOutput, error) + CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput) + + CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error) + CopySnapshotWithContext(aws.Context, *ec2.CopySnapshotInput, ...request.Option) (*ec2.CopySnapshotOutput, error) + CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput) + + CreateCapacityReservation(*ec2.CreateCapacityReservationInput) (*ec2.CreateCapacityReservationOutput, error) + CreateCapacityReservationWithContext(aws.Context, *ec2.CreateCapacityReservationInput, ...request.Option) (*ec2.CreateCapacityReservationOutput, error) + CreateCapacityReservationRequest(*ec2.CreateCapacityReservationInput) (*request.Request, *ec2.CreateCapacityReservationOutput) + + CreateCapacityReservationFleet(*ec2.CreateCapacityReservationFleetInput) (*ec2.CreateCapacityReservationFleetOutput, error) + CreateCapacityReservationFleetWithContext(aws.Context, *ec2.CreateCapacityReservationFleetInput, ...request.Option) (*ec2.CreateCapacityReservationFleetOutput, error) + CreateCapacityReservationFleetRequest(*ec2.CreateCapacityReservationFleetInput) (*request.Request, *ec2.CreateCapacityReservationFleetOutput) + + CreateCarrierGateway(*ec2.CreateCarrierGatewayInput) (*ec2.CreateCarrierGatewayOutput, error) + CreateCarrierGatewayWithContext(aws.Context, *ec2.CreateCarrierGatewayInput, ...request.Option) (*ec2.CreateCarrierGatewayOutput, error) + CreateCarrierGatewayRequest(*ec2.CreateCarrierGatewayInput) (*request.Request, *ec2.CreateCarrierGatewayOutput) + + CreateClientVpnEndpoint(*ec2.CreateClientVpnEndpointInput) (*ec2.CreateClientVpnEndpointOutput, error) + CreateClientVpnEndpointWithContext(aws.Context, *ec2.CreateClientVpnEndpointInput, ...request.Option) (*ec2.CreateClientVpnEndpointOutput, error) + CreateClientVpnEndpointRequest(*ec2.CreateClientVpnEndpointInput) (*request.Request, *ec2.CreateClientVpnEndpointOutput) + + CreateClientVpnRoute(*ec2.CreateClientVpnRouteInput) (*ec2.CreateClientVpnRouteOutput, error) + CreateClientVpnRouteWithContext(aws.Context, *ec2.CreateClientVpnRouteInput, ...request.Option) (*ec2.CreateClientVpnRouteOutput, error) + CreateClientVpnRouteRequest(*ec2.CreateClientVpnRouteInput) (*request.Request, *ec2.CreateClientVpnRouteOutput) + + CreateCoipCidr(*ec2.CreateCoipCidrInput) (*ec2.CreateCoipCidrOutput, error) + CreateCoipCidrWithContext(aws.Context, *ec2.CreateCoipCidrInput, ...request.Option) (*ec2.CreateCoipCidrOutput, error) + CreateCoipCidrRequest(*ec2.CreateCoipCidrInput) (*request.Request, *ec2.CreateCoipCidrOutput) + + CreateCoipPool(*ec2.CreateCoipPoolInput) (*ec2.CreateCoipPoolOutput, error) + CreateCoipPoolWithContext(aws.Context, *ec2.CreateCoipPoolInput, ...request.Option) (*ec2.CreateCoipPoolOutput, error) + CreateCoipPoolRequest(*ec2.CreateCoipPoolInput) (*request.Request, *ec2.CreateCoipPoolOutput) + + CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error) + CreateCustomerGatewayWithContext(aws.Context, *ec2.CreateCustomerGatewayInput, ...request.Option) (*ec2.CreateCustomerGatewayOutput, error) + CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput) + + CreateDefaultSubnet(*ec2.CreateDefaultSubnetInput) (*ec2.CreateDefaultSubnetOutput, error) + CreateDefaultSubnetWithContext(aws.Context, *ec2.CreateDefaultSubnetInput, ...request.Option) (*ec2.CreateDefaultSubnetOutput, error) + CreateDefaultSubnetRequest(*ec2.CreateDefaultSubnetInput) (*request.Request, *ec2.CreateDefaultSubnetOutput) + + CreateDefaultVpc(*ec2.CreateDefaultVpcInput) (*ec2.CreateDefaultVpcOutput, error) + CreateDefaultVpcWithContext(aws.Context, *ec2.CreateDefaultVpcInput, ...request.Option) (*ec2.CreateDefaultVpcOutput, error) + CreateDefaultVpcRequest(*ec2.CreateDefaultVpcInput) (*request.Request, *ec2.CreateDefaultVpcOutput) + + CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error) + CreateDhcpOptionsWithContext(aws.Context, *ec2.CreateDhcpOptionsInput, ...request.Option) (*ec2.CreateDhcpOptionsOutput, error) + CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput) + + CreateEgressOnlyInternetGateway(*ec2.CreateEgressOnlyInternetGatewayInput) (*ec2.CreateEgressOnlyInternetGatewayOutput, error) + CreateEgressOnlyInternetGatewayWithContext(aws.Context, *ec2.CreateEgressOnlyInternetGatewayInput, ...request.Option) (*ec2.CreateEgressOnlyInternetGatewayOutput, error) + CreateEgressOnlyInternetGatewayRequest(*ec2.CreateEgressOnlyInternetGatewayInput) (*request.Request, *ec2.CreateEgressOnlyInternetGatewayOutput) + + CreateFleet(*ec2.CreateFleetInput) (*ec2.CreateFleetOutput, error) + CreateFleetWithContext(aws.Context, *ec2.CreateFleetInput, ...request.Option) (*ec2.CreateFleetOutput, error) + CreateFleetRequest(*ec2.CreateFleetInput) (*request.Request, *ec2.CreateFleetOutput) + + CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error) + CreateFlowLogsWithContext(aws.Context, *ec2.CreateFlowLogsInput, ...request.Option) (*ec2.CreateFlowLogsOutput, error) + CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput) + + CreateFpgaImage(*ec2.CreateFpgaImageInput) (*ec2.CreateFpgaImageOutput, error) + CreateFpgaImageWithContext(aws.Context, *ec2.CreateFpgaImageInput, ...request.Option) (*ec2.CreateFpgaImageOutput, error) + CreateFpgaImageRequest(*ec2.CreateFpgaImageInput) (*request.Request, *ec2.CreateFpgaImageOutput) + + CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error) + CreateImageWithContext(aws.Context, *ec2.CreateImageInput, ...request.Option) (*ec2.CreateImageOutput, error) + CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput) + + CreateInstanceConnectEndpoint(*ec2.CreateInstanceConnectEndpointInput) (*ec2.CreateInstanceConnectEndpointOutput, error) + CreateInstanceConnectEndpointWithContext(aws.Context, *ec2.CreateInstanceConnectEndpointInput, ...request.Option) (*ec2.CreateInstanceConnectEndpointOutput, error) + CreateInstanceConnectEndpointRequest(*ec2.CreateInstanceConnectEndpointInput) (*request.Request, *ec2.CreateInstanceConnectEndpointOutput) + + CreateInstanceEventWindow(*ec2.CreateInstanceEventWindowInput) (*ec2.CreateInstanceEventWindowOutput, error) + CreateInstanceEventWindowWithContext(aws.Context, *ec2.CreateInstanceEventWindowInput, ...request.Option) (*ec2.CreateInstanceEventWindowOutput, error) + CreateInstanceEventWindowRequest(*ec2.CreateInstanceEventWindowInput) (*request.Request, *ec2.CreateInstanceEventWindowOutput) + + CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error) + CreateInstanceExportTaskWithContext(aws.Context, *ec2.CreateInstanceExportTaskInput, ...request.Option) (*ec2.CreateInstanceExportTaskOutput, error) + CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput) + + CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error) + CreateInternetGatewayWithContext(aws.Context, *ec2.CreateInternetGatewayInput, ...request.Option) (*ec2.CreateInternetGatewayOutput, error) + CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput) + + CreateIpam(*ec2.CreateIpamInput) (*ec2.CreateIpamOutput, error) + CreateIpamWithContext(aws.Context, *ec2.CreateIpamInput, ...request.Option) (*ec2.CreateIpamOutput, error) + CreateIpamRequest(*ec2.CreateIpamInput) (*request.Request, *ec2.CreateIpamOutput) + + CreateIpamExternalResourceVerificationToken(*ec2.CreateIpamExternalResourceVerificationTokenInput) (*ec2.CreateIpamExternalResourceVerificationTokenOutput, error) + CreateIpamExternalResourceVerificationTokenWithContext(aws.Context, *ec2.CreateIpamExternalResourceVerificationTokenInput, ...request.Option) (*ec2.CreateIpamExternalResourceVerificationTokenOutput, error) + CreateIpamExternalResourceVerificationTokenRequest(*ec2.CreateIpamExternalResourceVerificationTokenInput) (*request.Request, *ec2.CreateIpamExternalResourceVerificationTokenOutput) + + CreateIpamPool(*ec2.CreateIpamPoolInput) (*ec2.CreateIpamPoolOutput, error) + CreateIpamPoolWithContext(aws.Context, *ec2.CreateIpamPoolInput, ...request.Option) (*ec2.CreateIpamPoolOutput, error) + CreateIpamPoolRequest(*ec2.CreateIpamPoolInput) (*request.Request, *ec2.CreateIpamPoolOutput) + + CreateIpamResourceDiscovery(*ec2.CreateIpamResourceDiscoveryInput) (*ec2.CreateIpamResourceDiscoveryOutput, error) + CreateIpamResourceDiscoveryWithContext(aws.Context, *ec2.CreateIpamResourceDiscoveryInput, ...request.Option) (*ec2.CreateIpamResourceDiscoveryOutput, error) + CreateIpamResourceDiscoveryRequest(*ec2.CreateIpamResourceDiscoveryInput) (*request.Request, *ec2.CreateIpamResourceDiscoveryOutput) + + CreateIpamScope(*ec2.CreateIpamScopeInput) (*ec2.CreateIpamScopeOutput, error) + CreateIpamScopeWithContext(aws.Context, *ec2.CreateIpamScopeInput, ...request.Option) (*ec2.CreateIpamScopeOutput, error) + CreateIpamScopeRequest(*ec2.CreateIpamScopeInput) (*request.Request, *ec2.CreateIpamScopeOutput) + + CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error) + CreateKeyPairWithContext(aws.Context, *ec2.CreateKeyPairInput, ...request.Option) (*ec2.CreateKeyPairOutput, error) + CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput) + + CreateLaunchTemplate(*ec2.CreateLaunchTemplateInput) (*ec2.CreateLaunchTemplateOutput, error) + CreateLaunchTemplateWithContext(aws.Context, *ec2.CreateLaunchTemplateInput, ...request.Option) (*ec2.CreateLaunchTemplateOutput, error) + CreateLaunchTemplateRequest(*ec2.CreateLaunchTemplateInput) (*request.Request, *ec2.CreateLaunchTemplateOutput) + + CreateLaunchTemplateVersion(*ec2.CreateLaunchTemplateVersionInput) (*ec2.CreateLaunchTemplateVersionOutput, error) + CreateLaunchTemplateVersionWithContext(aws.Context, *ec2.CreateLaunchTemplateVersionInput, ...request.Option) (*ec2.CreateLaunchTemplateVersionOutput, error) + CreateLaunchTemplateVersionRequest(*ec2.CreateLaunchTemplateVersionInput) (*request.Request, *ec2.CreateLaunchTemplateVersionOutput) + + CreateLocalGatewayRoute(*ec2.CreateLocalGatewayRouteInput) (*ec2.CreateLocalGatewayRouteOutput, error) + CreateLocalGatewayRouteWithContext(aws.Context, *ec2.CreateLocalGatewayRouteInput, ...request.Option) (*ec2.CreateLocalGatewayRouteOutput, error) + CreateLocalGatewayRouteRequest(*ec2.CreateLocalGatewayRouteInput) (*request.Request, *ec2.CreateLocalGatewayRouteOutput) + + CreateLocalGatewayRouteTable(*ec2.CreateLocalGatewayRouteTableInput) (*ec2.CreateLocalGatewayRouteTableOutput, error) + CreateLocalGatewayRouteTableWithContext(aws.Context, *ec2.CreateLocalGatewayRouteTableInput, ...request.Option) (*ec2.CreateLocalGatewayRouteTableOutput, error) + CreateLocalGatewayRouteTableRequest(*ec2.CreateLocalGatewayRouteTableInput) (*request.Request, *ec2.CreateLocalGatewayRouteTableOutput) + + CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation(*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error) + CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationWithContext(aws.Context, *ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput, ...request.Option) (*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error) + CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationRequest(*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*request.Request, *ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput) + + CreateLocalGatewayRouteTableVpcAssociation(*ec2.CreateLocalGatewayRouteTableVpcAssociationInput) (*ec2.CreateLocalGatewayRouteTableVpcAssociationOutput, error) + CreateLocalGatewayRouteTableVpcAssociationWithContext(aws.Context, *ec2.CreateLocalGatewayRouteTableVpcAssociationInput, ...request.Option) (*ec2.CreateLocalGatewayRouteTableVpcAssociationOutput, error) + CreateLocalGatewayRouteTableVpcAssociationRequest(*ec2.CreateLocalGatewayRouteTableVpcAssociationInput) (*request.Request, *ec2.CreateLocalGatewayRouteTableVpcAssociationOutput) + + CreateManagedPrefixList(*ec2.CreateManagedPrefixListInput) (*ec2.CreateManagedPrefixListOutput, error) + CreateManagedPrefixListWithContext(aws.Context, *ec2.CreateManagedPrefixListInput, ...request.Option) (*ec2.CreateManagedPrefixListOutput, error) + CreateManagedPrefixListRequest(*ec2.CreateManagedPrefixListInput) (*request.Request, *ec2.CreateManagedPrefixListOutput) + + CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error) + CreateNatGatewayWithContext(aws.Context, *ec2.CreateNatGatewayInput, ...request.Option) (*ec2.CreateNatGatewayOutput, error) + CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput) + + CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error) + CreateNetworkAclWithContext(aws.Context, *ec2.CreateNetworkAclInput, ...request.Option) (*ec2.CreateNetworkAclOutput, error) + CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput) + + CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error) + CreateNetworkAclEntryWithContext(aws.Context, *ec2.CreateNetworkAclEntryInput, ...request.Option) (*ec2.CreateNetworkAclEntryOutput, error) + CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput) + + CreateNetworkInsightsAccessScope(*ec2.CreateNetworkInsightsAccessScopeInput) (*ec2.CreateNetworkInsightsAccessScopeOutput, error) + CreateNetworkInsightsAccessScopeWithContext(aws.Context, *ec2.CreateNetworkInsightsAccessScopeInput, ...request.Option) (*ec2.CreateNetworkInsightsAccessScopeOutput, error) + CreateNetworkInsightsAccessScopeRequest(*ec2.CreateNetworkInsightsAccessScopeInput) (*request.Request, *ec2.CreateNetworkInsightsAccessScopeOutput) + + CreateNetworkInsightsPath(*ec2.CreateNetworkInsightsPathInput) (*ec2.CreateNetworkInsightsPathOutput, error) + CreateNetworkInsightsPathWithContext(aws.Context, *ec2.CreateNetworkInsightsPathInput, ...request.Option) (*ec2.CreateNetworkInsightsPathOutput, error) + CreateNetworkInsightsPathRequest(*ec2.CreateNetworkInsightsPathInput) (*request.Request, *ec2.CreateNetworkInsightsPathOutput) + + CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error) + CreateNetworkInterfaceWithContext(aws.Context, *ec2.CreateNetworkInterfaceInput, ...request.Option) (*ec2.CreateNetworkInterfaceOutput, error) + CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput) + + CreateNetworkInterfacePermission(*ec2.CreateNetworkInterfacePermissionInput) (*ec2.CreateNetworkInterfacePermissionOutput, error) + CreateNetworkInterfacePermissionWithContext(aws.Context, *ec2.CreateNetworkInterfacePermissionInput, ...request.Option) (*ec2.CreateNetworkInterfacePermissionOutput, error) + CreateNetworkInterfacePermissionRequest(*ec2.CreateNetworkInterfacePermissionInput) (*request.Request, *ec2.CreateNetworkInterfacePermissionOutput) + + CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error) + CreatePlacementGroupWithContext(aws.Context, *ec2.CreatePlacementGroupInput, ...request.Option) (*ec2.CreatePlacementGroupOutput, error) + CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput) + + CreatePublicIpv4Pool(*ec2.CreatePublicIpv4PoolInput) (*ec2.CreatePublicIpv4PoolOutput, error) + CreatePublicIpv4PoolWithContext(aws.Context, *ec2.CreatePublicIpv4PoolInput, ...request.Option) (*ec2.CreatePublicIpv4PoolOutput, error) + CreatePublicIpv4PoolRequest(*ec2.CreatePublicIpv4PoolInput) (*request.Request, *ec2.CreatePublicIpv4PoolOutput) + + CreateReplaceRootVolumeTask(*ec2.CreateReplaceRootVolumeTaskInput) (*ec2.CreateReplaceRootVolumeTaskOutput, error) + CreateReplaceRootVolumeTaskWithContext(aws.Context, *ec2.CreateReplaceRootVolumeTaskInput, ...request.Option) (*ec2.CreateReplaceRootVolumeTaskOutput, error) + CreateReplaceRootVolumeTaskRequest(*ec2.CreateReplaceRootVolumeTaskInput) (*request.Request, *ec2.CreateReplaceRootVolumeTaskOutput) + + CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error) + CreateReservedInstancesListingWithContext(aws.Context, *ec2.CreateReservedInstancesListingInput, ...request.Option) (*ec2.CreateReservedInstancesListingOutput, error) + CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput) + + CreateRestoreImageTask(*ec2.CreateRestoreImageTaskInput) (*ec2.CreateRestoreImageTaskOutput, error) + CreateRestoreImageTaskWithContext(aws.Context, *ec2.CreateRestoreImageTaskInput, ...request.Option) (*ec2.CreateRestoreImageTaskOutput, error) + CreateRestoreImageTaskRequest(*ec2.CreateRestoreImageTaskInput) (*request.Request, *ec2.CreateRestoreImageTaskOutput) + + CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) + CreateRouteWithContext(aws.Context, *ec2.CreateRouteInput, ...request.Option) (*ec2.CreateRouteOutput, error) + CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput) + + CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error) + CreateRouteTableWithContext(aws.Context, *ec2.CreateRouteTableInput, ...request.Option) (*ec2.CreateRouteTableOutput, error) + CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput) + + CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) + CreateSecurityGroupWithContext(aws.Context, *ec2.CreateSecurityGroupInput, ...request.Option) (*ec2.CreateSecurityGroupOutput, error) + CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput) + + CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error) + CreateSnapshotWithContext(aws.Context, *ec2.CreateSnapshotInput, ...request.Option) (*ec2.Snapshot, error) + CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot) + + CreateSnapshots(*ec2.CreateSnapshotsInput) (*ec2.CreateSnapshotsOutput, error) + CreateSnapshotsWithContext(aws.Context, *ec2.CreateSnapshotsInput, ...request.Option) (*ec2.CreateSnapshotsOutput, error) + CreateSnapshotsRequest(*ec2.CreateSnapshotsInput) (*request.Request, *ec2.CreateSnapshotsOutput) + + CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error) + CreateSpotDatafeedSubscriptionWithContext(aws.Context, *ec2.CreateSpotDatafeedSubscriptionInput, ...request.Option) (*ec2.CreateSpotDatafeedSubscriptionOutput, error) + CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput) + + CreateStoreImageTask(*ec2.CreateStoreImageTaskInput) (*ec2.CreateStoreImageTaskOutput, error) + CreateStoreImageTaskWithContext(aws.Context, *ec2.CreateStoreImageTaskInput, ...request.Option) (*ec2.CreateStoreImageTaskOutput, error) + CreateStoreImageTaskRequest(*ec2.CreateStoreImageTaskInput) (*request.Request, *ec2.CreateStoreImageTaskOutput) + + CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error) + CreateSubnetWithContext(aws.Context, *ec2.CreateSubnetInput, ...request.Option) (*ec2.CreateSubnetOutput, error) + CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput) + + CreateSubnetCidrReservation(*ec2.CreateSubnetCidrReservationInput) (*ec2.CreateSubnetCidrReservationOutput, error) + CreateSubnetCidrReservationWithContext(aws.Context, *ec2.CreateSubnetCidrReservationInput, ...request.Option) (*ec2.CreateSubnetCidrReservationOutput, error) + CreateSubnetCidrReservationRequest(*ec2.CreateSubnetCidrReservationInput) (*request.Request, *ec2.CreateSubnetCidrReservationOutput) + + CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) + CreateTagsWithContext(aws.Context, *ec2.CreateTagsInput, ...request.Option) (*ec2.CreateTagsOutput, error) + CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput) + + CreateTrafficMirrorFilter(*ec2.CreateTrafficMirrorFilterInput) (*ec2.CreateTrafficMirrorFilterOutput, error) + CreateTrafficMirrorFilterWithContext(aws.Context, *ec2.CreateTrafficMirrorFilterInput, ...request.Option) (*ec2.CreateTrafficMirrorFilterOutput, error) + CreateTrafficMirrorFilterRequest(*ec2.CreateTrafficMirrorFilterInput) (*request.Request, *ec2.CreateTrafficMirrorFilterOutput) + + CreateTrafficMirrorFilterRule(*ec2.CreateTrafficMirrorFilterRuleInput) (*ec2.CreateTrafficMirrorFilterRuleOutput, error) + CreateTrafficMirrorFilterRuleWithContext(aws.Context, *ec2.CreateTrafficMirrorFilterRuleInput, ...request.Option) (*ec2.CreateTrafficMirrorFilterRuleOutput, error) + CreateTrafficMirrorFilterRuleRequest(*ec2.CreateTrafficMirrorFilterRuleInput) (*request.Request, *ec2.CreateTrafficMirrorFilterRuleOutput) + + CreateTrafficMirrorSession(*ec2.CreateTrafficMirrorSessionInput) (*ec2.CreateTrafficMirrorSessionOutput, error) + CreateTrafficMirrorSessionWithContext(aws.Context, *ec2.CreateTrafficMirrorSessionInput, ...request.Option) (*ec2.CreateTrafficMirrorSessionOutput, error) + CreateTrafficMirrorSessionRequest(*ec2.CreateTrafficMirrorSessionInput) (*request.Request, *ec2.CreateTrafficMirrorSessionOutput) + + CreateTrafficMirrorTarget(*ec2.CreateTrafficMirrorTargetInput) (*ec2.CreateTrafficMirrorTargetOutput, error) + CreateTrafficMirrorTargetWithContext(aws.Context, *ec2.CreateTrafficMirrorTargetInput, ...request.Option) (*ec2.CreateTrafficMirrorTargetOutput, error) + CreateTrafficMirrorTargetRequest(*ec2.CreateTrafficMirrorTargetInput) (*request.Request, *ec2.CreateTrafficMirrorTargetOutput) + + CreateTransitGateway(*ec2.CreateTransitGatewayInput) (*ec2.CreateTransitGatewayOutput, error) + CreateTransitGatewayWithContext(aws.Context, *ec2.CreateTransitGatewayInput, ...request.Option) (*ec2.CreateTransitGatewayOutput, error) + CreateTransitGatewayRequest(*ec2.CreateTransitGatewayInput) (*request.Request, *ec2.CreateTransitGatewayOutput) + + CreateTransitGatewayConnect(*ec2.CreateTransitGatewayConnectInput) (*ec2.CreateTransitGatewayConnectOutput, error) + CreateTransitGatewayConnectWithContext(aws.Context, *ec2.CreateTransitGatewayConnectInput, ...request.Option) (*ec2.CreateTransitGatewayConnectOutput, error) + CreateTransitGatewayConnectRequest(*ec2.CreateTransitGatewayConnectInput) (*request.Request, *ec2.CreateTransitGatewayConnectOutput) + + CreateTransitGatewayConnectPeer(*ec2.CreateTransitGatewayConnectPeerInput) (*ec2.CreateTransitGatewayConnectPeerOutput, error) + CreateTransitGatewayConnectPeerWithContext(aws.Context, *ec2.CreateTransitGatewayConnectPeerInput, ...request.Option) (*ec2.CreateTransitGatewayConnectPeerOutput, error) + CreateTransitGatewayConnectPeerRequest(*ec2.CreateTransitGatewayConnectPeerInput) (*request.Request, *ec2.CreateTransitGatewayConnectPeerOutput) + + CreateTransitGatewayMulticastDomain(*ec2.CreateTransitGatewayMulticastDomainInput) (*ec2.CreateTransitGatewayMulticastDomainOutput, error) + CreateTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.CreateTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.CreateTransitGatewayMulticastDomainOutput, error) + CreateTransitGatewayMulticastDomainRequest(*ec2.CreateTransitGatewayMulticastDomainInput) (*request.Request, *ec2.CreateTransitGatewayMulticastDomainOutput) + + CreateTransitGatewayPeeringAttachment(*ec2.CreateTransitGatewayPeeringAttachmentInput) (*ec2.CreateTransitGatewayPeeringAttachmentOutput, error) + CreateTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.CreateTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.CreateTransitGatewayPeeringAttachmentOutput, error) + CreateTransitGatewayPeeringAttachmentRequest(*ec2.CreateTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.CreateTransitGatewayPeeringAttachmentOutput) + + CreateTransitGatewayPolicyTable(*ec2.CreateTransitGatewayPolicyTableInput) (*ec2.CreateTransitGatewayPolicyTableOutput, error) + CreateTransitGatewayPolicyTableWithContext(aws.Context, *ec2.CreateTransitGatewayPolicyTableInput, ...request.Option) (*ec2.CreateTransitGatewayPolicyTableOutput, error) + CreateTransitGatewayPolicyTableRequest(*ec2.CreateTransitGatewayPolicyTableInput) (*request.Request, *ec2.CreateTransitGatewayPolicyTableOutput) + + CreateTransitGatewayPrefixListReference(*ec2.CreateTransitGatewayPrefixListReferenceInput) (*ec2.CreateTransitGatewayPrefixListReferenceOutput, error) + CreateTransitGatewayPrefixListReferenceWithContext(aws.Context, *ec2.CreateTransitGatewayPrefixListReferenceInput, ...request.Option) (*ec2.CreateTransitGatewayPrefixListReferenceOutput, error) + CreateTransitGatewayPrefixListReferenceRequest(*ec2.CreateTransitGatewayPrefixListReferenceInput) (*request.Request, *ec2.CreateTransitGatewayPrefixListReferenceOutput) + + CreateTransitGatewayRoute(*ec2.CreateTransitGatewayRouteInput) (*ec2.CreateTransitGatewayRouteOutput, error) + CreateTransitGatewayRouteWithContext(aws.Context, *ec2.CreateTransitGatewayRouteInput, ...request.Option) (*ec2.CreateTransitGatewayRouteOutput, error) + CreateTransitGatewayRouteRequest(*ec2.CreateTransitGatewayRouteInput) (*request.Request, *ec2.CreateTransitGatewayRouteOutput) + + CreateTransitGatewayRouteTable(*ec2.CreateTransitGatewayRouteTableInput) (*ec2.CreateTransitGatewayRouteTableOutput, error) + CreateTransitGatewayRouteTableWithContext(aws.Context, *ec2.CreateTransitGatewayRouteTableInput, ...request.Option) (*ec2.CreateTransitGatewayRouteTableOutput, error) + CreateTransitGatewayRouteTableRequest(*ec2.CreateTransitGatewayRouteTableInput) (*request.Request, *ec2.CreateTransitGatewayRouteTableOutput) + + CreateTransitGatewayRouteTableAnnouncement(*ec2.CreateTransitGatewayRouteTableAnnouncementInput) (*ec2.CreateTransitGatewayRouteTableAnnouncementOutput, error) + CreateTransitGatewayRouteTableAnnouncementWithContext(aws.Context, *ec2.CreateTransitGatewayRouteTableAnnouncementInput, ...request.Option) (*ec2.CreateTransitGatewayRouteTableAnnouncementOutput, error) + CreateTransitGatewayRouteTableAnnouncementRequest(*ec2.CreateTransitGatewayRouteTableAnnouncementInput) (*request.Request, *ec2.CreateTransitGatewayRouteTableAnnouncementOutput) + + CreateTransitGatewayVpcAttachment(*ec2.CreateTransitGatewayVpcAttachmentInput) (*ec2.CreateTransitGatewayVpcAttachmentOutput, error) + CreateTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.CreateTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.CreateTransitGatewayVpcAttachmentOutput, error) + CreateTransitGatewayVpcAttachmentRequest(*ec2.CreateTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.CreateTransitGatewayVpcAttachmentOutput) + + CreateVerifiedAccessEndpoint(*ec2.CreateVerifiedAccessEndpointInput) (*ec2.CreateVerifiedAccessEndpointOutput, error) + CreateVerifiedAccessEndpointWithContext(aws.Context, *ec2.CreateVerifiedAccessEndpointInput, ...request.Option) (*ec2.CreateVerifiedAccessEndpointOutput, error) + CreateVerifiedAccessEndpointRequest(*ec2.CreateVerifiedAccessEndpointInput) (*request.Request, *ec2.CreateVerifiedAccessEndpointOutput) + + CreateVerifiedAccessGroup(*ec2.CreateVerifiedAccessGroupInput) (*ec2.CreateVerifiedAccessGroupOutput, error) + CreateVerifiedAccessGroupWithContext(aws.Context, *ec2.CreateVerifiedAccessGroupInput, ...request.Option) (*ec2.CreateVerifiedAccessGroupOutput, error) + CreateVerifiedAccessGroupRequest(*ec2.CreateVerifiedAccessGroupInput) (*request.Request, *ec2.CreateVerifiedAccessGroupOutput) + + CreateVerifiedAccessInstance(*ec2.CreateVerifiedAccessInstanceInput) (*ec2.CreateVerifiedAccessInstanceOutput, error) + CreateVerifiedAccessInstanceWithContext(aws.Context, *ec2.CreateVerifiedAccessInstanceInput, ...request.Option) (*ec2.CreateVerifiedAccessInstanceOutput, error) + CreateVerifiedAccessInstanceRequest(*ec2.CreateVerifiedAccessInstanceInput) (*request.Request, *ec2.CreateVerifiedAccessInstanceOutput) + + CreateVerifiedAccessTrustProvider(*ec2.CreateVerifiedAccessTrustProviderInput) (*ec2.CreateVerifiedAccessTrustProviderOutput, error) + CreateVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.CreateVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.CreateVerifiedAccessTrustProviderOutput, error) + CreateVerifiedAccessTrustProviderRequest(*ec2.CreateVerifiedAccessTrustProviderInput) (*request.Request, *ec2.CreateVerifiedAccessTrustProviderOutput) + + CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error) + CreateVolumeWithContext(aws.Context, *ec2.CreateVolumeInput, ...request.Option) (*ec2.Volume, error) + CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume) + + CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error) + CreateVpcWithContext(aws.Context, *ec2.CreateVpcInput, ...request.Option) (*ec2.CreateVpcOutput, error) + CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput) + + CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error) + CreateVpcEndpointWithContext(aws.Context, *ec2.CreateVpcEndpointInput, ...request.Option) (*ec2.CreateVpcEndpointOutput, error) + CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput) + + CreateVpcEndpointConnectionNotification(*ec2.CreateVpcEndpointConnectionNotificationInput) (*ec2.CreateVpcEndpointConnectionNotificationOutput, error) + CreateVpcEndpointConnectionNotificationWithContext(aws.Context, *ec2.CreateVpcEndpointConnectionNotificationInput, ...request.Option) (*ec2.CreateVpcEndpointConnectionNotificationOutput, error) + CreateVpcEndpointConnectionNotificationRequest(*ec2.CreateVpcEndpointConnectionNotificationInput) (*request.Request, *ec2.CreateVpcEndpointConnectionNotificationOutput) + + CreateVpcEndpointServiceConfiguration(*ec2.CreateVpcEndpointServiceConfigurationInput) (*ec2.CreateVpcEndpointServiceConfigurationOutput, error) + CreateVpcEndpointServiceConfigurationWithContext(aws.Context, *ec2.CreateVpcEndpointServiceConfigurationInput, ...request.Option) (*ec2.CreateVpcEndpointServiceConfigurationOutput, error) + CreateVpcEndpointServiceConfigurationRequest(*ec2.CreateVpcEndpointServiceConfigurationInput) (*request.Request, *ec2.CreateVpcEndpointServiceConfigurationOutput) + + CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error) + CreateVpcPeeringConnectionWithContext(aws.Context, *ec2.CreateVpcPeeringConnectionInput, ...request.Option) (*ec2.CreateVpcPeeringConnectionOutput, error) + CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput) + + CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error) + CreateVpnConnectionWithContext(aws.Context, *ec2.CreateVpnConnectionInput, ...request.Option) (*ec2.CreateVpnConnectionOutput, error) + CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput) + + CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error) + CreateVpnConnectionRouteWithContext(aws.Context, *ec2.CreateVpnConnectionRouteInput, ...request.Option) (*ec2.CreateVpnConnectionRouteOutput, error) + CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput) + + CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error) + CreateVpnGatewayWithContext(aws.Context, *ec2.CreateVpnGatewayInput, ...request.Option) (*ec2.CreateVpnGatewayOutput, error) + CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput) + + DeleteCarrierGateway(*ec2.DeleteCarrierGatewayInput) (*ec2.DeleteCarrierGatewayOutput, error) + DeleteCarrierGatewayWithContext(aws.Context, *ec2.DeleteCarrierGatewayInput, ...request.Option) (*ec2.DeleteCarrierGatewayOutput, error) + DeleteCarrierGatewayRequest(*ec2.DeleteCarrierGatewayInput) (*request.Request, *ec2.DeleteCarrierGatewayOutput) + + DeleteClientVpnEndpoint(*ec2.DeleteClientVpnEndpointInput) (*ec2.DeleteClientVpnEndpointOutput, error) + DeleteClientVpnEndpointWithContext(aws.Context, *ec2.DeleteClientVpnEndpointInput, ...request.Option) (*ec2.DeleteClientVpnEndpointOutput, error) + DeleteClientVpnEndpointRequest(*ec2.DeleteClientVpnEndpointInput) (*request.Request, *ec2.DeleteClientVpnEndpointOutput) + + DeleteClientVpnRoute(*ec2.DeleteClientVpnRouteInput) (*ec2.DeleteClientVpnRouteOutput, error) + DeleteClientVpnRouteWithContext(aws.Context, *ec2.DeleteClientVpnRouteInput, ...request.Option) (*ec2.DeleteClientVpnRouteOutput, error) + DeleteClientVpnRouteRequest(*ec2.DeleteClientVpnRouteInput) (*request.Request, *ec2.DeleteClientVpnRouteOutput) + + DeleteCoipCidr(*ec2.DeleteCoipCidrInput) (*ec2.DeleteCoipCidrOutput, error) + DeleteCoipCidrWithContext(aws.Context, *ec2.DeleteCoipCidrInput, ...request.Option) (*ec2.DeleteCoipCidrOutput, error) + DeleteCoipCidrRequest(*ec2.DeleteCoipCidrInput) (*request.Request, *ec2.DeleteCoipCidrOutput) + + DeleteCoipPool(*ec2.DeleteCoipPoolInput) (*ec2.DeleteCoipPoolOutput, error) + DeleteCoipPoolWithContext(aws.Context, *ec2.DeleteCoipPoolInput, ...request.Option) (*ec2.DeleteCoipPoolOutput, error) + DeleteCoipPoolRequest(*ec2.DeleteCoipPoolInput) (*request.Request, *ec2.DeleteCoipPoolOutput) + + DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error) + DeleteCustomerGatewayWithContext(aws.Context, *ec2.DeleteCustomerGatewayInput, ...request.Option) (*ec2.DeleteCustomerGatewayOutput, error) + DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput) + + DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error) + DeleteDhcpOptionsWithContext(aws.Context, *ec2.DeleteDhcpOptionsInput, ...request.Option) (*ec2.DeleteDhcpOptionsOutput, error) + DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput) + + DeleteEgressOnlyInternetGateway(*ec2.DeleteEgressOnlyInternetGatewayInput) (*ec2.DeleteEgressOnlyInternetGatewayOutput, error) + DeleteEgressOnlyInternetGatewayWithContext(aws.Context, *ec2.DeleteEgressOnlyInternetGatewayInput, ...request.Option) (*ec2.DeleteEgressOnlyInternetGatewayOutput, error) + DeleteEgressOnlyInternetGatewayRequest(*ec2.DeleteEgressOnlyInternetGatewayInput) (*request.Request, *ec2.DeleteEgressOnlyInternetGatewayOutput) + + DeleteFleets(*ec2.DeleteFleetsInput) (*ec2.DeleteFleetsOutput, error) + DeleteFleetsWithContext(aws.Context, *ec2.DeleteFleetsInput, ...request.Option) (*ec2.DeleteFleetsOutput, error) + DeleteFleetsRequest(*ec2.DeleteFleetsInput) (*request.Request, *ec2.DeleteFleetsOutput) + + DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error) + DeleteFlowLogsWithContext(aws.Context, *ec2.DeleteFlowLogsInput, ...request.Option) (*ec2.DeleteFlowLogsOutput, error) + DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput) + + DeleteFpgaImage(*ec2.DeleteFpgaImageInput) (*ec2.DeleteFpgaImageOutput, error) + DeleteFpgaImageWithContext(aws.Context, *ec2.DeleteFpgaImageInput, ...request.Option) (*ec2.DeleteFpgaImageOutput, error) + DeleteFpgaImageRequest(*ec2.DeleteFpgaImageInput) (*request.Request, *ec2.DeleteFpgaImageOutput) + + DeleteInstanceConnectEndpoint(*ec2.DeleteInstanceConnectEndpointInput) (*ec2.DeleteInstanceConnectEndpointOutput, error) + DeleteInstanceConnectEndpointWithContext(aws.Context, *ec2.DeleteInstanceConnectEndpointInput, ...request.Option) (*ec2.DeleteInstanceConnectEndpointOutput, error) + DeleteInstanceConnectEndpointRequest(*ec2.DeleteInstanceConnectEndpointInput) (*request.Request, *ec2.DeleteInstanceConnectEndpointOutput) + + DeleteInstanceEventWindow(*ec2.DeleteInstanceEventWindowInput) (*ec2.DeleteInstanceEventWindowOutput, error) + DeleteInstanceEventWindowWithContext(aws.Context, *ec2.DeleteInstanceEventWindowInput, ...request.Option) (*ec2.DeleteInstanceEventWindowOutput, error) + DeleteInstanceEventWindowRequest(*ec2.DeleteInstanceEventWindowInput) (*request.Request, *ec2.DeleteInstanceEventWindowOutput) + + DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error) + DeleteInternetGatewayWithContext(aws.Context, *ec2.DeleteInternetGatewayInput, ...request.Option) (*ec2.DeleteInternetGatewayOutput, error) + DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput) + + DeleteIpam(*ec2.DeleteIpamInput) (*ec2.DeleteIpamOutput, error) + DeleteIpamWithContext(aws.Context, *ec2.DeleteIpamInput, ...request.Option) (*ec2.DeleteIpamOutput, error) + DeleteIpamRequest(*ec2.DeleteIpamInput) (*request.Request, *ec2.DeleteIpamOutput) + + DeleteIpamExternalResourceVerificationToken(*ec2.DeleteIpamExternalResourceVerificationTokenInput) (*ec2.DeleteIpamExternalResourceVerificationTokenOutput, error) + DeleteIpamExternalResourceVerificationTokenWithContext(aws.Context, *ec2.DeleteIpamExternalResourceVerificationTokenInput, ...request.Option) (*ec2.DeleteIpamExternalResourceVerificationTokenOutput, error) + DeleteIpamExternalResourceVerificationTokenRequest(*ec2.DeleteIpamExternalResourceVerificationTokenInput) (*request.Request, *ec2.DeleteIpamExternalResourceVerificationTokenOutput) + + DeleteIpamPool(*ec2.DeleteIpamPoolInput) (*ec2.DeleteIpamPoolOutput, error) + DeleteIpamPoolWithContext(aws.Context, *ec2.DeleteIpamPoolInput, ...request.Option) (*ec2.DeleteIpamPoolOutput, error) + DeleteIpamPoolRequest(*ec2.DeleteIpamPoolInput) (*request.Request, *ec2.DeleteIpamPoolOutput) + + DeleteIpamResourceDiscovery(*ec2.DeleteIpamResourceDiscoveryInput) (*ec2.DeleteIpamResourceDiscoveryOutput, error) + DeleteIpamResourceDiscoveryWithContext(aws.Context, *ec2.DeleteIpamResourceDiscoveryInput, ...request.Option) (*ec2.DeleteIpamResourceDiscoveryOutput, error) + DeleteIpamResourceDiscoveryRequest(*ec2.DeleteIpamResourceDiscoveryInput) (*request.Request, *ec2.DeleteIpamResourceDiscoveryOutput) + + DeleteIpamScope(*ec2.DeleteIpamScopeInput) (*ec2.DeleteIpamScopeOutput, error) + DeleteIpamScopeWithContext(aws.Context, *ec2.DeleteIpamScopeInput, ...request.Option) (*ec2.DeleteIpamScopeOutput, error) + DeleteIpamScopeRequest(*ec2.DeleteIpamScopeInput) (*request.Request, *ec2.DeleteIpamScopeOutput) + + DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error) + DeleteKeyPairWithContext(aws.Context, *ec2.DeleteKeyPairInput, ...request.Option) (*ec2.DeleteKeyPairOutput, error) + DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput) + + DeleteLaunchTemplate(*ec2.DeleteLaunchTemplateInput) (*ec2.DeleteLaunchTemplateOutput, error) + DeleteLaunchTemplateWithContext(aws.Context, *ec2.DeleteLaunchTemplateInput, ...request.Option) (*ec2.DeleteLaunchTemplateOutput, error) + DeleteLaunchTemplateRequest(*ec2.DeleteLaunchTemplateInput) (*request.Request, *ec2.DeleteLaunchTemplateOutput) + + DeleteLaunchTemplateVersions(*ec2.DeleteLaunchTemplateVersionsInput) (*ec2.DeleteLaunchTemplateVersionsOutput, error) + DeleteLaunchTemplateVersionsWithContext(aws.Context, *ec2.DeleteLaunchTemplateVersionsInput, ...request.Option) (*ec2.DeleteLaunchTemplateVersionsOutput, error) + DeleteLaunchTemplateVersionsRequest(*ec2.DeleteLaunchTemplateVersionsInput) (*request.Request, *ec2.DeleteLaunchTemplateVersionsOutput) + + DeleteLocalGatewayRoute(*ec2.DeleteLocalGatewayRouteInput) (*ec2.DeleteLocalGatewayRouteOutput, error) + DeleteLocalGatewayRouteWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteOutput, error) + DeleteLocalGatewayRouteRequest(*ec2.DeleteLocalGatewayRouteInput) (*request.Request, *ec2.DeleteLocalGatewayRouteOutput) + + DeleteLocalGatewayRouteTable(*ec2.DeleteLocalGatewayRouteTableInput) (*ec2.DeleteLocalGatewayRouteTableOutput, error) + DeleteLocalGatewayRouteTableWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteTableInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteTableOutput, error) + DeleteLocalGatewayRouteTableRequest(*ec2.DeleteLocalGatewayRouteTableInput) (*request.Request, *ec2.DeleteLocalGatewayRouteTableOutput) + + DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation(*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error) + DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error) + DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationRequest(*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*request.Request, *ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput) + + DeleteLocalGatewayRouteTableVpcAssociation(*ec2.DeleteLocalGatewayRouteTableVpcAssociationInput) (*ec2.DeleteLocalGatewayRouteTableVpcAssociationOutput, error) + DeleteLocalGatewayRouteTableVpcAssociationWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteTableVpcAssociationInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteTableVpcAssociationOutput, error) + DeleteLocalGatewayRouteTableVpcAssociationRequest(*ec2.DeleteLocalGatewayRouteTableVpcAssociationInput) (*request.Request, *ec2.DeleteLocalGatewayRouteTableVpcAssociationOutput) + + DeleteManagedPrefixList(*ec2.DeleteManagedPrefixListInput) (*ec2.DeleteManagedPrefixListOutput, error) + DeleteManagedPrefixListWithContext(aws.Context, *ec2.DeleteManagedPrefixListInput, ...request.Option) (*ec2.DeleteManagedPrefixListOutput, error) + DeleteManagedPrefixListRequest(*ec2.DeleteManagedPrefixListInput) (*request.Request, *ec2.DeleteManagedPrefixListOutput) + + DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error) + DeleteNatGatewayWithContext(aws.Context, *ec2.DeleteNatGatewayInput, ...request.Option) (*ec2.DeleteNatGatewayOutput, error) + DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput) + + DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error) + DeleteNetworkAclWithContext(aws.Context, *ec2.DeleteNetworkAclInput, ...request.Option) (*ec2.DeleteNetworkAclOutput, error) + DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput) + + DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error) + DeleteNetworkAclEntryWithContext(aws.Context, *ec2.DeleteNetworkAclEntryInput, ...request.Option) (*ec2.DeleteNetworkAclEntryOutput, error) + DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput) + + DeleteNetworkInsightsAccessScope(*ec2.DeleteNetworkInsightsAccessScopeInput) (*ec2.DeleteNetworkInsightsAccessScopeOutput, error) + DeleteNetworkInsightsAccessScopeWithContext(aws.Context, *ec2.DeleteNetworkInsightsAccessScopeInput, ...request.Option) (*ec2.DeleteNetworkInsightsAccessScopeOutput, error) + DeleteNetworkInsightsAccessScopeRequest(*ec2.DeleteNetworkInsightsAccessScopeInput) (*request.Request, *ec2.DeleteNetworkInsightsAccessScopeOutput) + + DeleteNetworkInsightsAccessScopeAnalysis(*ec2.DeleteNetworkInsightsAccessScopeAnalysisInput) (*ec2.DeleteNetworkInsightsAccessScopeAnalysisOutput, error) + DeleteNetworkInsightsAccessScopeAnalysisWithContext(aws.Context, *ec2.DeleteNetworkInsightsAccessScopeAnalysisInput, ...request.Option) (*ec2.DeleteNetworkInsightsAccessScopeAnalysisOutput, error) + DeleteNetworkInsightsAccessScopeAnalysisRequest(*ec2.DeleteNetworkInsightsAccessScopeAnalysisInput) (*request.Request, *ec2.DeleteNetworkInsightsAccessScopeAnalysisOutput) + + DeleteNetworkInsightsAnalysis(*ec2.DeleteNetworkInsightsAnalysisInput) (*ec2.DeleteNetworkInsightsAnalysisOutput, error) + DeleteNetworkInsightsAnalysisWithContext(aws.Context, *ec2.DeleteNetworkInsightsAnalysisInput, ...request.Option) (*ec2.DeleteNetworkInsightsAnalysisOutput, error) + DeleteNetworkInsightsAnalysisRequest(*ec2.DeleteNetworkInsightsAnalysisInput) (*request.Request, *ec2.DeleteNetworkInsightsAnalysisOutput) + + DeleteNetworkInsightsPath(*ec2.DeleteNetworkInsightsPathInput) (*ec2.DeleteNetworkInsightsPathOutput, error) + DeleteNetworkInsightsPathWithContext(aws.Context, *ec2.DeleteNetworkInsightsPathInput, ...request.Option) (*ec2.DeleteNetworkInsightsPathOutput, error) + DeleteNetworkInsightsPathRequest(*ec2.DeleteNetworkInsightsPathInput) (*request.Request, *ec2.DeleteNetworkInsightsPathOutput) + + DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error) + DeleteNetworkInterfaceWithContext(aws.Context, *ec2.DeleteNetworkInterfaceInput, ...request.Option) (*ec2.DeleteNetworkInterfaceOutput, error) + DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput) + + DeleteNetworkInterfacePermission(*ec2.DeleteNetworkInterfacePermissionInput) (*ec2.DeleteNetworkInterfacePermissionOutput, error) + DeleteNetworkInterfacePermissionWithContext(aws.Context, *ec2.DeleteNetworkInterfacePermissionInput, ...request.Option) (*ec2.DeleteNetworkInterfacePermissionOutput, error) + DeleteNetworkInterfacePermissionRequest(*ec2.DeleteNetworkInterfacePermissionInput) (*request.Request, *ec2.DeleteNetworkInterfacePermissionOutput) + + DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error) + DeletePlacementGroupWithContext(aws.Context, *ec2.DeletePlacementGroupInput, ...request.Option) (*ec2.DeletePlacementGroupOutput, error) + DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput) + + DeletePublicIpv4Pool(*ec2.DeletePublicIpv4PoolInput) (*ec2.DeletePublicIpv4PoolOutput, error) + DeletePublicIpv4PoolWithContext(aws.Context, *ec2.DeletePublicIpv4PoolInput, ...request.Option) (*ec2.DeletePublicIpv4PoolOutput, error) + DeletePublicIpv4PoolRequest(*ec2.DeletePublicIpv4PoolInput) (*request.Request, *ec2.DeletePublicIpv4PoolOutput) + + DeleteQueuedReservedInstances(*ec2.DeleteQueuedReservedInstancesInput) (*ec2.DeleteQueuedReservedInstancesOutput, error) + DeleteQueuedReservedInstancesWithContext(aws.Context, *ec2.DeleteQueuedReservedInstancesInput, ...request.Option) (*ec2.DeleteQueuedReservedInstancesOutput, error) + DeleteQueuedReservedInstancesRequest(*ec2.DeleteQueuedReservedInstancesInput) (*request.Request, *ec2.DeleteQueuedReservedInstancesOutput) + + DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) + DeleteRouteWithContext(aws.Context, *ec2.DeleteRouteInput, ...request.Option) (*ec2.DeleteRouteOutput, error) + DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput) + + DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error) + DeleteRouteTableWithContext(aws.Context, *ec2.DeleteRouteTableInput, ...request.Option) (*ec2.DeleteRouteTableOutput, error) + DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput) + + DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) + DeleteSecurityGroupWithContext(aws.Context, *ec2.DeleteSecurityGroupInput, ...request.Option) (*ec2.DeleteSecurityGroupOutput, error) + DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput) + + DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error) + DeleteSnapshotWithContext(aws.Context, *ec2.DeleteSnapshotInput, ...request.Option) (*ec2.DeleteSnapshotOutput, error) + DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput) + + DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error) + DeleteSpotDatafeedSubscriptionWithContext(aws.Context, *ec2.DeleteSpotDatafeedSubscriptionInput, ...request.Option) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error) + DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput) + + DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error) + DeleteSubnetWithContext(aws.Context, *ec2.DeleteSubnetInput, ...request.Option) (*ec2.DeleteSubnetOutput, error) + DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput) + + DeleteSubnetCidrReservation(*ec2.DeleteSubnetCidrReservationInput) (*ec2.DeleteSubnetCidrReservationOutput, error) + DeleteSubnetCidrReservationWithContext(aws.Context, *ec2.DeleteSubnetCidrReservationInput, ...request.Option) (*ec2.DeleteSubnetCidrReservationOutput, error) + DeleteSubnetCidrReservationRequest(*ec2.DeleteSubnetCidrReservationInput) (*request.Request, *ec2.DeleteSubnetCidrReservationOutput) + + DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error) + DeleteTagsWithContext(aws.Context, *ec2.DeleteTagsInput, ...request.Option) (*ec2.DeleteTagsOutput, error) + DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput) + + DeleteTrafficMirrorFilter(*ec2.DeleteTrafficMirrorFilterInput) (*ec2.DeleteTrafficMirrorFilterOutput, error) + DeleteTrafficMirrorFilterWithContext(aws.Context, *ec2.DeleteTrafficMirrorFilterInput, ...request.Option) (*ec2.DeleteTrafficMirrorFilterOutput, error) + DeleteTrafficMirrorFilterRequest(*ec2.DeleteTrafficMirrorFilterInput) (*request.Request, *ec2.DeleteTrafficMirrorFilterOutput) + + DeleteTrafficMirrorFilterRule(*ec2.DeleteTrafficMirrorFilterRuleInput) (*ec2.DeleteTrafficMirrorFilterRuleOutput, error) + DeleteTrafficMirrorFilterRuleWithContext(aws.Context, *ec2.DeleteTrafficMirrorFilterRuleInput, ...request.Option) (*ec2.DeleteTrafficMirrorFilterRuleOutput, error) + DeleteTrafficMirrorFilterRuleRequest(*ec2.DeleteTrafficMirrorFilterRuleInput) (*request.Request, *ec2.DeleteTrafficMirrorFilterRuleOutput) + + DeleteTrafficMirrorSession(*ec2.DeleteTrafficMirrorSessionInput) (*ec2.DeleteTrafficMirrorSessionOutput, error) + DeleteTrafficMirrorSessionWithContext(aws.Context, *ec2.DeleteTrafficMirrorSessionInput, ...request.Option) (*ec2.DeleteTrafficMirrorSessionOutput, error) + DeleteTrafficMirrorSessionRequest(*ec2.DeleteTrafficMirrorSessionInput) (*request.Request, *ec2.DeleteTrafficMirrorSessionOutput) + + DeleteTrafficMirrorTarget(*ec2.DeleteTrafficMirrorTargetInput) (*ec2.DeleteTrafficMirrorTargetOutput, error) + DeleteTrafficMirrorTargetWithContext(aws.Context, *ec2.DeleteTrafficMirrorTargetInput, ...request.Option) (*ec2.DeleteTrafficMirrorTargetOutput, error) + DeleteTrafficMirrorTargetRequest(*ec2.DeleteTrafficMirrorTargetInput) (*request.Request, *ec2.DeleteTrafficMirrorTargetOutput) + + DeleteTransitGateway(*ec2.DeleteTransitGatewayInput) (*ec2.DeleteTransitGatewayOutput, error) + DeleteTransitGatewayWithContext(aws.Context, *ec2.DeleteTransitGatewayInput, ...request.Option) (*ec2.DeleteTransitGatewayOutput, error) + DeleteTransitGatewayRequest(*ec2.DeleteTransitGatewayInput) (*request.Request, *ec2.DeleteTransitGatewayOutput) + + DeleteTransitGatewayConnect(*ec2.DeleteTransitGatewayConnectInput) (*ec2.DeleteTransitGatewayConnectOutput, error) + DeleteTransitGatewayConnectWithContext(aws.Context, *ec2.DeleteTransitGatewayConnectInput, ...request.Option) (*ec2.DeleteTransitGatewayConnectOutput, error) + DeleteTransitGatewayConnectRequest(*ec2.DeleteTransitGatewayConnectInput) (*request.Request, *ec2.DeleteTransitGatewayConnectOutput) + + DeleteTransitGatewayConnectPeer(*ec2.DeleteTransitGatewayConnectPeerInput) (*ec2.DeleteTransitGatewayConnectPeerOutput, error) + DeleteTransitGatewayConnectPeerWithContext(aws.Context, *ec2.DeleteTransitGatewayConnectPeerInput, ...request.Option) (*ec2.DeleteTransitGatewayConnectPeerOutput, error) + DeleteTransitGatewayConnectPeerRequest(*ec2.DeleteTransitGatewayConnectPeerInput) (*request.Request, *ec2.DeleteTransitGatewayConnectPeerOutput) + + DeleteTransitGatewayMulticastDomain(*ec2.DeleteTransitGatewayMulticastDomainInput) (*ec2.DeleteTransitGatewayMulticastDomainOutput, error) + DeleteTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.DeleteTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.DeleteTransitGatewayMulticastDomainOutput, error) + DeleteTransitGatewayMulticastDomainRequest(*ec2.DeleteTransitGatewayMulticastDomainInput) (*request.Request, *ec2.DeleteTransitGatewayMulticastDomainOutput) + + DeleteTransitGatewayPeeringAttachment(*ec2.DeleteTransitGatewayPeeringAttachmentInput) (*ec2.DeleteTransitGatewayPeeringAttachmentOutput, error) + DeleteTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.DeleteTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.DeleteTransitGatewayPeeringAttachmentOutput, error) + DeleteTransitGatewayPeeringAttachmentRequest(*ec2.DeleteTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.DeleteTransitGatewayPeeringAttachmentOutput) + + DeleteTransitGatewayPolicyTable(*ec2.DeleteTransitGatewayPolicyTableInput) (*ec2.DeleteTransitGatewayPolicyTableOutput, error) + DeleteTransitGatewayPolicyTableWithContext(aws.Context, *ec2.DeleteTransitGatewayPolicyTableInput, ...request.Option) (*ec2.DeleteTransitGatewayPolicyTableOutput, error) + DeleteTransitGatewayPolicyTableRequest(*ec2.DeleteTransitGatewayPolicyTableInput) (*request.Request, *ec2.DeleteTransitGatewayPolicyTableOutput) + + DeleteTransitGatewayPrefixListReference(*ec2.DeleteTransitGatewayPrefixListReferenceInput) (*ec2.DeleteTransitGatewayPrefixListReferenceOutput, error) + DeleteTransitGatewayPrefixListReferenceWithContext(aws.Context, *ec2.DeleteTransitGatewayPrefixListReferenceInput, ...request.Option) (*ec2.DeleteTransitGatewayPrefixListReferenceOutput, error) + DeleteTransitGatewayPrefixListReferenceRequest(*ec2.DeleteTransitGatewayPrefixListReferenceInput) (*request.Request, *ec2.DeleteTransitGatewayPrefixListReferenceOutput) + + DeleteTransitGatewayRoute(*ec2.DeleteTransitGatewayRouteInput) (*ec2.DeleteTransitGatewayRouteOutput, error) + DeleteTransitGatewayRouteWithContext(aws.Context, *ec2.DeleteTransitGatewayRouteInput, ...request.Option) (*ec2.DeleteTransitGatewayRouteOutput, error) + DeleteTransitGatewayRouteRequest(*ec2.DeleteTransitGatewayRouteInput) (*request.Request, *ec2.DeleteTransitGatewayRouteOutput) + + DeleteTransitGatewayRouteTable(*ec2.DeleteTransitGatewayRouteTableInput) (*ec2.DeleteTransitGatewayRouteTableOutput, error) + DeleteTransitGatewayRouteTableWithContext(aws.Context, *ec2.DeleteTransitGatewayRouteTableInput, ...request.Option) (*ec2.DeleteTransitGatewayRouteTableOutput, error) + DeleteTransitGatewayRouteTableRequest(*ec2.DeleteTransitGatewayRouteTableInput) (*request.Request, *ec2.DeleteTransitGatewayRouteTableOutput) + + DeleteTransitGatewayRouteTableAnnouncement(*ec2.DeleteTransitGatewayRouteTableAnnouncementInput) (*ec2.DeleteTransitGatewayRouteTableAnnouncementOutput, error) + DeleteTransitGatewayRouteTableAnnouncementWithContext(aws.Context, *ec2.DeleteTransitGatewayRouteTableAnnouncementInput, ...request.Option) (*ec2.DeleteTransitGatewayRouteTableAnnouncementOutput, error) + DeleteTransitGatewayRouteTableAnnouncementRequest(*ec2.DeleteTransitGatewayRouteTableAnnouncementInput) (*request.Request, *ec2.DeleteTransitGatewayRouteTableAnnouncementOutput) + + DeleteTransitGatewayVpcAttachment(*ec2.DeleteTransitGatewayVpcAttachmentInput) (*ec2.DeleteTransitGatewayVpcAttachmentOutput, error) + DeleteTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.DeleteTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.DeleteTransitGatewayVpcAttachmentOutput, error) + DeleteTransitGatewayVpcAttachmentRequest(*ec2.DeleteTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.DeleteTransitGatewayVpcAttachmentOutput) + + DeleteVerifiedAccessEndpoint(*ec2.DeleteVerifiedAccessEndpointInput) (*ec2.DeleteVerifiedAccessEndpointOutput, error) + DeleteVerifiedAccessEndpointWithContext(aws.Context, *ec2.DeleteVerifiedAccessEndpointInput, ...request.Option) (*ec2.DeleteVerifiedAccessEndpointOutput, error) + DeleteVerifiedAccessEndpointRequest(*ec2.DeleteVerifiedAccessEndpointInput) (*request.Request, *ec2.DeleteVerifiedAccessEndpointOutput) + + DeleteVerifiedAccessGroup(*ec2.DeleteVerifiedAccessGroupInput) (*ec2.DeleteVerifiedAccessGroupOutput, error) + DeleteVerifiedAccessGroupWithContext(aws.Context, *ec2.DeleteVerifiedAccessGroupInput, ...request.Option) (*ec2.DeleteVerifiedAccessGroupOutput, error) + DeleteVerifiedAccessGroupRequest(*ec2.DeleteVerifiedAccessGroupInput) (*request.Request, *ec2.DeleteVerifiedAccessGroupOutput) + + DeleteVerifiedAccessInstance(*ec2.DeleteVerifiedAccessInstanceInput) (*ec2.DeleteVerifiedAccessInstanceOutput, error) + DeleteVerifiedAccessInstanceWithContext(aws.Context, *ec2.DeleteVerifiedAccessInstanceInput, ...request.Option) (*ec2.DeleteVerifiedAccessInstanceOutput, error) + DeleteVerifiedAccessInstanceRequest(*ec2.DeleteVerifiedAccessInstanceInput) (*request.Request, *ec2.DeleteVerifiedAccessInstanceOutput) + + DeleteVerifiedAccessTrustProvider(*ec2.DeleteVerifiedAccessTrustProviderInput) (*ec2.DeleteVerifiedAccessTrustProviderOutput, error) + DeleteVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.DeleteVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.DeleteVerifiedAccessTrustProviderOutput, error) + DeleteVerifiedAccessTrustProviderRequest(*ec2.DeleteVerifiedAccessTrustProviderInput) (*request.Request, *ec2.DeleteVerifiedAccessTrustProviderOutput) + + DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) + DeleteVolumeWithContext(aws.Context, *ec2.DeleteVolumeInput, ...request.Option) (*ec2.DeleteVolumeOutput, error) + DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput) + + DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error) + DeleteVpcWithContext(aws.Context, *ec2.DeleteVpcInput, ...request.Option) (*ec2.DeleteVpcOutput, error) + DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput) + + DeleteVpcEndpointConnectionNotifications(*ec2.DeleteVpcEndpointConnectionNotificationsInput) (*ec2.DeleteVpcEndpointConnectionNotificationsOutput, error) + DeleteVpcEndpointConnectionNotificationsWithContext(aws.Context, *ec2.DeleteVpcEndpointConnectionNotificationsInput, ...request.Option) (*ec2.DeleteVpcEndpointConnectionNotificationsOutput, error) + DeleteVpcEndpointConnectionNotificationsRequest(*ec2.DeleteVpcEndpointConnectionNotificationsInput) (*request.Request, *ec2.DeleteVpcEndpointConnectionNotificationsOutput) + + DeleteVpcEndpointServiceConfigurations(*ec2.DeleteVpcEndpointServiceConfigurationsInput) (*ec2.DeleteVpcEndpointServiceConfigurationsOutput, error) + DeleteVpcEndpointServiceConfigurationsWithContext(aws.Context, *ec2.DeleteVpcEndpointServiceConfigurationsInput, ...request.Option) (*ec2.DeleteVpcEndpointServiceConfigurationsOutput, error) + DeleteVpcEndpointServiceConfigurationsRequest(*ec2.DeleteVpcEndpointServiceConfigurationsInput) (*request.Request, *ec2.DeleteVpcEndpointServiceConfigurationsOutput) + + DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error) + DeleteVpcEndpointsWithContext(aws.Context, *ec2.DeleteVpcEndpointsInput, ...request.Option) (*ec2.DeleteVpcEndpointsOutput, error) + DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput) + + DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error) + DeleteVpcPeeringConnectionWithContext(aws.Context, *ec2.DeleteVpcPeeringConnectionInput, ...request.Option) (*ec2.DeleteVpcPeeringConnectionOutput, error) + DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput) + + DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error) + DeleteVpnConnectionWithContext(aws.Context, *ec2.DeleteVpnConnectionInput, ...request.Option) (*ec2.DeleteVpnConnectionOutput, error) + DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput) + + DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error) + DeleteVpnConnectionRouteWithContext(aws.Context, *ec2.DeleteVpnConnectionRouteInput, ...request.Option) (*ec2.DeleteVpnConnectionRouteOutput, error) + DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput) + + DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error) + DeleteVpnGatewayWithContext(aws.Context, *ec2.DeleteVpnGatewayInput, ...request.Option) (*ec2.DeleteVpnGatewayOutput, error) + DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput) + + DeprovisionByoipCidr(*ec2.DeprovisionByoipCidrInput) (*ec2.DeprovisionByoipCidrOutput, error) + DeprovisionByoipCidrWithContext(aws.Context, *ec2.DeprovisionByoipCidrInput, ...request.Option) (*ec2.DeprovisionByoipCidrOutput, error) + DeprovisionByoipCidrRequest(*ec2.DeprovisionByoipCidrInput) (*request.Request, *ec2.DeprovisionByoipCidrOutput) + + DeprovisionIpamByoasn(*ec2.DeprovisionIpamByoasnInput) (*ec2.DeprovisionIpamByoasnOutput, error) + DeprovisionIpamByoasnWithContext(aws.Context, *ec2.DeprovisionIpamByoasnInput, ...request.Option) (*ec2.DeprovisionIpamByoasnOutput, error) + DeprovisionIpamByoasnRequest(*ec2.DeprovisionIpamByoasnInput) (*request.Request, *ec2.DeprovisionIpamByoasnOutput) + + DeprovisionIpamPoolCidr(*ec2.DeprovisionIpamPoolCidrInput) (*ec2.DeprovisionIpamPoolCidrOutput, error) + DeprovisionIpamPoolCidrWithContext(aws.Context, *ec2.DeprovisionIpamPoolCidrInput, ...request.Option) (*ec2.DeprovisionIpamPoolCidrOutput, error) + DeprovisionIpamPoolCidrRequest(*ec2.DeprovisionIpamPoolCidrInput) (*request.Request, *ec2.DeprovisionIpamPoolCidrOutput) + + DeprovisionPublicIpv4PoolCidr(*ec2.DeprovisionPublicIpv4PoolCidrInput) (*ec2.DeprovisionPublicIpv4PoolCidrOutput, error) + DeprovisionPublicIpv4PoolCidrWithContext(aws.Context, *ec2.DeprovisionPublicIpv4PoolCidrInput, ...request.Option) (*ec2.DeprovisionPublicIpv4PoolCidrOutput, error) + DeprovisionPublicIpv4PoolCidrRequest(*ec2.DeprovisionPublicIpv4PoolCidrInput) (*request.Request, *ec2.DeprovisionPublicIpv4PoolCidrOutput) + + DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error) + DeregisterImageWithContext(aws.Context, *ec2.DeregisterImageInput, ...request.Option) (*ec2.DeregisterImageOutput, error) + DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput) + + DeregisterInstanceEventNotificationAttributes(*ec2.DeregisterInstanceEventNotificationAttributesInput) (*ec2.DeregisterInstanceEventNotificationAttributesOutput, error) + DeregisterInstanceEventNotificationAttributesWithContext(aws.Context, *ec2.DeregisterInstanceEventNotificationAttributesInput, ...request.Option) (*ec2.DeregisterInstanceEventNotificationAttributesOutput, error) + DeregisterInstanceEventNotificationAttributesRequest(*ec2.DeregisterInstanceEventNotificationAttributesInput) (*request.Request, *ec2.DeregisterInstanceEventNotificationAttributesOutput) + + DeregisterTransitGatewayMulticastGroupMembers(*ec2.DeregisterTransitGatewayMulticastGroupMembersInput) (*ec2.DeregisterTransitGatewayMulticastGroupMembersOutput, error) + DeregisterTransitGatewayMulticastGroupMembersWithContext(aws.Context, *ec2.DeregisterTransitGatewayMulticastGroupMembersInput, ...request.Option) (*ec2.DeregisterTransitGatewayMulticastGroupMembersOutput, error) + DeregisterTransitGatewayMulticastGroupMembersRequest(*ec2.DeregisterTransitGatewayMulticastGroupMembersInput) (*request.Request, *ec2.DeregisterTransitGatewayMulticastGroupMembersOutput) + + DeregisterTransitGatewayMulticastGroupSources(*ec2.DeregisterTransitGatewayMulticastGroupSourcesInput) (*ec2.DeregisterTransitGatewayMulticastGroupSourcesOutput, error) + DeregisterTransitGatewayMulticastGroupSourcesWithContext(aws.Context, *ec2.DeregisterTransitGatewayMulticastGroupSourcesInput, ...request.Option) (*ec2.DeregisterTransitGatewayMulticastGroupSourcesOutput, error) + DeregisterTransitGatewayMulticastGroupSourcesRequest(*ec2.DeregisterTransitGatewayMulticastGroupSourcesInput) (*request.Request, *ec2.DeregisterTransitGatewayMulticastGroupSourcesOutput) + + DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error) + DescribeAccountAttributesWithContext(aws.Context, *ec2.DescribeAccountAttributesInput, ...request.Option) (*ec2.DescribeAccountAttributesOutput, error) + DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput) + + DescribeAddressTransfers(*ec2.DescribeAddressTransfersInput) (*ec2.DescribeAddressTransfersOutput, error) + DescribeAddressTransfersWithContext(aws.Context, *ec2.DescribeAddressTransfersInput, ...request.Option) (*ec2.DescribeAddressTransfersOutput, error) + DescribeAddressTransfersRequest(*ec2.DescribeAddressTransfersInput) (*request.Request, *ec2.DescribeAddressTransfersOutput) + + DescribeAddressTransfersPages(*ec2.DescribeAddressTransfersInput, func(*ec2.DescribeAddressTransfersOutput, bool) bool) error + DescribeAddressTransfersPagesWithContext(aws.Context, *ec2.DescribeAddressTransfersInput, func(*ec2.DescribeAddressTransfersOutput, bool) bool, ...request.Option) error + + DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error) + DescribeAddressesWithContext(aws.Context, *ec2.DescribeAddressesInput, ...request.Option) (*ec2.DescribeAddressesOutput, error) + DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput) + + DescribeAddressesAttribute(*ec2.DescribeAddressesAttributeInput) (*ec2.DescribeAddressesAttributeOutput, error) + DescribeAddressesAttributeWithContext(aws.Context, *ec2.DescribeAddressesAttributeInput, ...request.Option) (*ec2.DescribeAddressesAttributeOutput, error) + DescribeAddressesAttributeRequest(*ec2.DescribeAddressesAttributeInput) (*request.Request, *ec2.DescribeAddressesAttributeOutput) + + DescribeAddressesAttributePages(*ec2.DescribeAddressesAttributeInput, func(*ec2.DescribeAddressesAttributeOutput, bool) bool) error + DescribeAddressesAttributePagesWithContext(aws.Context, *ec2.DescribeAddressesAttributeInput, func(*ec2.DescribeAddressesAttributeOutput, bool) bool, ...request.Option) error + + DescribeAggregateIdFormat(*ec2.DescribeAggregateIdFormatInput) (*ec2.DescribeAggregateIdFormatOutput, error) + DescribeAggregateIdFormatWithContext(aws.Context, *ec2.DescribeAggregateIdFormatInput, ...request.Option) (*ec2.DescribeAggregateIdFormatOutput, error) + DescribeAggregateIdFormatRequest(*ec2.DescribeAggregateIdFormatInput) (*request.Request, *ec2.DescribeAggregateIdFormatOutput) + + DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error) + DescribeAvailabilityZonesWithContext(aws.Context, *ec2.DescribeAvailabilityZonesInput, ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) + DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput) + + DescribeAwsNetworkPerformanceMetricSubscriptions(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput) (*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, error) + DescribeAwsNetworkPerformanceMetricSubscriptionsWithContext(aws.Context, *ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput, ...request.Option) (*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, error) + DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput) (*request.Request, *ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput) + + DescribeAwsNetworkPerformanceMetricSubscriptionsPages(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput, func(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, bool) bool) error + DescribeAwsNetworkPerformanceMetricSubscriptionsPagesWithContext(aws.Context, *ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput, func(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, bool) bool, ...request.Option) error + + DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error) + DescribeBundleTasksWithContext(aws.Context, *ec2.DescribeBundleTasksInput, ...request.Option) (*ec2.DescribeBundleTasksOutput, error) + DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput) + + DescribeByoipCidrs(*ec2.DescribeByoipCidrsInput) (*ec2.DescribeByoipCidrsOutput, error) + DescribeByoipCidrsWithContext(aws.Context, *ec2.DescribeByoipCidrsInput, ...request.Option) (*ec2.DescribeByoipCidrsOutput, error) + DescribeByoipCidrsRequest(*ec2.DescribeByoipCidrsInput) (*request.Request, *ec2.DescribeByoipCidrsOutput) + + DescribeByoipCidrsPages(*ec2.DescribeByoipCidrsInput, func(*ec2.DescribeByoipCidrsOutput, bool) bool) error + DescribeByoipCidrsPagesWithContext(aws.Context, *ec2.DescribeByoipCidrsInput, func(*ec2.DescribeByoipCidrsOutput, bool) bool, ...request.Option) error + + DescribeCapacityBlockOfferings(*ec2.DescribeCapacityBlockOfferingsInput) (*ec2.DescribeCapacityBlockOfferingsOutput, error) + DescribeCapacityBlockOfferingsWithContext(aws.Context, *ec2.DescribeCapacityBlockOfferingsInput, ...request.Option) (*ec2.DescribeCapacityBlockOfferingsOutput, error) + DescribeCapacityBlockOfferingsRequest(*ec2.DescribeCapacityBlockOfferingsInput) (*request.Request, *ec2.DescribeCapacityBlockOfferingsOutput) + + DescribeCapacityBlockOfferingsPages(*ec2.DescribeCapacityBlockOfferingsInput, func(*ec2.DescribeCapacityBlockOfferingsOutput, bool) bool) error + DescribeCapacityBlockOfferingsPagesWithContext(aws.Context, *ec2.DescribeCapacityBlockOfferingsInput, func(*ec2.DescribeCapacityBlockOfferingsOutput, bool) bool, ...request.Option) error + + DescribeCapacityReservationFleets(*ec2.DescribeCapacityReservationFleetsInput) (*ec2.DescribeCapacityReservationFleetsOutput, error) + DescribeCapacityReservationFleetsWithContext(aws.Context, *ec2.DescribeCapacityReservationFleetsInput, ...request.Option) (*ec2.DescribeCapacityReservationFleetsOutput, error) + DescribeCapacityReservationFleetsRequest(*ec2.DescribeCapacityReservationFleetsInput) (*request.Request, *ec2.DescribeCapacityReservationFleetsOutput) + + DescribeCapacityReservationFleetsPages(*ec2.DescribeCapacityReservationFleetsInput, func(*ec2.DescribeCapacityReservationFleetsOutput, bool) bool) error + DescribeCapacityReservationFleetsPagesWithContext(aws.Context, *ec2.DescribeCapacityReservationFleetsInput, func(*ec2.DescribeCapacityReservationFleetsOutput, bool) bool, ...request.Option) error + + DescribeCapacityReservations(*ec2.DescribeCapacityReservationsInput) (*ec2.DescribeCapacityReservationsOutput, error) + DescribeCapacityReservationsWithContext(aws.Context, *ec2.DescribeCapacityReservationsInput, ...request.Option) (*ec2.DescribeCapacityReservationsOutput, error) + DescribeCapacityReservationsRequest(*ec2.DescribeCapacityReservationsInput) (*request.Request, *ec2.DescribeCapacityReservationsOutput) + + DescribeCapacityReservationsPages(*ec2.DescribeCapacityReservationsInput, func(*ec2.DescribeCapacityReservationsOutput, bool) bool) error + DescribeCapacityReservationsPagesWithContext(aws.Context, *ec2.DescribeCapacityReservationsInput, func(*ec2.DescribeCapacityReservationsOutput, bool) bool, ...request.Option) error + + DescribeCarrierGateways(*ec2.DescribeCarrierGatewaysInput) (*ec2.DescribeCarrierGatewaysOutput, error) + DescribeCarrierGatewaysWithContext(aws.Context, *ec2.DescribeCarrierGatewaysInput, ...request.Option) (*ec2.DescribeCarrierGatewaysOutput, error) + DescribeCarrierGatewaysRequest(*ec2.DescribeCarrierGatewaysInput) (*request.Request, *ec2.DescribeCarrierGatewaysOutput) + + DescribeCarrierGatewaysPages(*ec2.DescribeCarrierGatewaysInput, func(*ec2.DescribeCarrierGatewaysOutput, bool) bool) error + DescribeCarrierGatewaysPagesWithContext(aws.Context, *ec2.DescribeCarrierGatewaysInput, func(*ec2.DescribeCarrierGatewaysOutput, bool) bool, ...request.Option) error + + DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error) + DescribeClassicLinkInstancesWithContext(aws.Context, *ec2.DescribeClassicLinkInstancesInput, ...request.Option) (*ec2.DescribeClassicLinkInstancesOutput, error) + DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput) + + DescribeClassicLinkInstancesPages(*ec2.DescribeClassicLinkInstancesInput, func(*ec2.DescribeClassicLinkInstancesOutput, bool) bool) error + DescribeClassicLinkInstancesPagesWithContext(aws.Context, *ec2.DescribeClassicLinkInstancesInput, func(*ec2.DescribeClassicLinkInstancesOutput, bool) bool, ...request.Option) error + + DescribeClientVpnAuthorizationRules(*ec2.DescribeClientVpnAuthorizationRulesInput) (*ec2.DescribeClientVpnAuthorizationRulesOutput, error) + DescribeClientVpnAuthorizationRulesWithContext(aws.Context, *ec2.DescribeClientVpnAuthorizationRulesInput, ...request.Option) (*ec2.DescribeClientVpnAuthorizationRulesOutput, error) + DescribeClientVpnAuthorizationRulesRequest(*ec2.DescribeClientVpnAuthorizationRulesInput) (*request.Request, *ec2.DescribeClientVpnAuthorizationRulesOutput) + + DescribeClientVpnAuthorizationRulesPages(*ec2.DescribeClientVpnAuthorizationRulesInput, func(*ec2.DescribeClientVpnAuthorizationRulesOutput, bool) bool) error + DescribeClientVpnAuthorizationRulesPagesWithContext(aws.Context, *ec2.DescribeClientVpnAuthorizationRulesInput, func(*ec2.DescribeClientVpnAuthorizationRulesOutput, bool) bool, ...request.Option) error + + DescribeClientVpnConnections(*ec2.DescribeClientVpnConnectionsInput) (*ec2.DescribeClientVpnConnectionsOutput, error) + DescribeClientVpnConnectionsWithContext(aws.Context, *ec2.DescribeClientVpnConnectionsInput, ...request.Option) (*ec2.DescribeClientVpnConnectionsOutput, error) + DescribeClientVpnConnectionsRequest(*ec2.DescribeClientVpnConnectionsInput) (*request.Request, *ec2.DescribeClientVpnConnectionsOutput) + + DescribeClientVpnConnectionsPages(*ec2.DescribeClientVpnConnectionsInput, func(*ec2.DescribeClientVpnConnectionsOutput, bool) bool) error + DescribeClientVpnConnectionsPagesWithContext(aws.Context, *ec2.DescribeClientVpnConnectionsInput, func(*ec2.DescribeClientVpnConnectionsOutput, bool) bool, ...request.Option) error + + DescribeClientVpnEndpoints(*ec2.DescribeClientVpnEndpointsInput) (*ec2.DescribeClientVpnEndpointsOutput, error) + DescribeClientVpnEndpointsWithContext(aws.Context, *ec2.DescribeClientVpnEndpointsInput, ...request.Option) (*ec2.DescribeClientVpnEndpointsOutput, error) + DescribeClientVpnEndpointsRequest(*ec2.DescribeClientVpnEndpointsInput) (*request.Request, *ec2.DescribeClientVpnEndpointsOutput) + + DescribeClientVpnEndpointsPages(*ec2.DescribeClientVpnEndpointsInput, func(*ec2.DescribeClientVpnEndpointsOutput, bool) bool) error + DescribeClientVpnEndpointsPagesWithContext(aws.Context, *ec2.DescribeClientVpnEndpointsInput, func(*ec2.DescribeClientVpnEndpointsOutput, bool) bool, ...request.Option) error + + DescribeClientVpnRoutes(*ec2.DescribeClientVpnRoutesInput) (*ec2.DescribeClientVpnRoutesOutput, error) + DescribeClientVpnRoutesWithContext(aws.Context, *ec2.DescribeClientVpnRoutesInput, ...request.Option) (*ec2.DescribeClientVpnRoutesOutput, error) + DescribeClientVpnRoutesRequest(*ec2.DescribeClientVpnRoutesInput) (*request.Request, *ec2.DescribeClientVpnRoutesOutput) + + DescribeClientVpnRoutesPages(*ec2.DescribeClientVpnRoutesInput, func(*ec2.DescribeClientVpnRoutesOutput, bool) bool) error + DescribeClientVpnRoutesPagesWithContext(aws.Context, *ec2.DescribeClientVpnRoutesInput, func(*ec2.DescribeClientVpnRoutesOutput, bool) bool, ...request.Option) error + + DescribeClientVpnTargetNetworks(*ec2.DescribeClientVpnTargetNetworksInput) (*ec2.DescribeClientVpnTargetNetworksOutput, error) + DescribeClientVpnTargetNetworksWithContext(aws.Context, *ec2.DescribeClientVpnTargetNetworksInput, ...request.Option) (*ec2.DescribeClientVpnTargetNetworksOutput, error) + DescribeClientVpnTargetNetworksRequest(*ec2.DescribeClientVpnTargetNetworksInput) (*request.Request, *ec2.DescribeClientVpnTargetNetworksOutput) + + DescribeClientVpnTargetNetworksPages(*ec2.DescribeClientVpnTargetNetworksInput, func(*ec2.DescribeClientVpnTargetNetworksOutput, bool) bool) error + DescribeClientVpnTargetNetworksPagesWithContext(aws.Context, *ec2.DescribeClientVpnTargetNetworksInput, func(*ec2.DescribeClientVpnTargetNetworksOutput, bool) bool, ...request.Option) error + + DescribeCoipPools(*ec2.DescribeCoipPoolsInput) (*ec2.DescribeCoipPoolsOutput, error) + DescribeCoipPoolsWithContext(aws.Context, *ec2.DescribeCoipPoolsInput, ...request.Option) (*ec2.DescribeCoipPoolsOutput, error) + DescribeCoipPoolsRequest(*ec2.DescribeCoipPoolsInput) (*request.Request, *ec2.DescribeCoipPoolsOutput) + + DescribeCoipPoolsPages(*ec2.DescribeCoipPoolsInput, func(*ec2.DescribeCoipPoolsOutput, bool) bool) error + DescribeCoipPoolsPagesWithContext(aws.Context, *ec2.DescribeCoipPoolsInput, func(*ec2.DescribeCoipPoolsOutput, bool) bool, ...request.Option) error + + DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error) + DescribeConversionTasksWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.Option) (*ec2.DescribeConversionTasksOutput, error) + DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput) + + DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error) + DescribeCustomerGatewaysWithContext(aws.Context, *ec2.DescribeCustomerGatewaysInput, ...request.Option) (*ec2.DescribeCustomerGatewaysOutput, error) + DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput) + + DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error) + DescribeDhcpOptionsWithContext(aws.Context, *ec2.DescribeDhcpOptionsInput, ...request.Option) (*ec2.DescribeDhcpOptionsOutput, error) + DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput) + + DescribeDhcpOptionsPages(*ec2.DescribeDhcpOptionsInput, func(*ec2.DescribeDhcpOptionsOutput, bool) bool) error + DescribeDhcpOptionsPagesWithContext(aws.Context, *ec2.DescribeDhcpOptionsInput, func(*ec2.DescribeDhcpOptionsOutput, bool) bool, ...request.Option) error + + DescribeEgressOnlyInternetGateways(*ec2.DescribeEgressOnlyInternetGatewaysInput) (*ec2.DescribeEgressOnlyInternetGatewaysOutput, error) + DescribeEgressOnlyInternetGatewaysWithContext(aws.Context, *ec2.DescribeEgressOnlyInternetGatewaysInput, ...request.Option) (*ec2.DescribeEgressOnlyInternetGatewaysOutput, error) + DescribeEgressOnlyInternetGatewaysRequest(*ec2.DescribeEgressOnlyInternetGatewaysInput) (*request.Request, *ec2.DescribeEgressOnlyInternetGatewaysOutput) + + DescribeEgressOnlyInternetGatewaysPages(*ec2.DescribeEgressOnlyInternetGatewaysInput, func(*ec2.DescribeEgressOnlyInternetGatewaysOutput, bool) bool) error + DescribeEgressOnlyInternetGatewaysPagesWithContext(aws.Context, *ec2.DescribeEgressOnlyInternetGatewaysInput, func(*ec2.DescribeEgressOnlyInternetGatewaysOutput, bool) bool, ...request.Option) error + + DescribeElasticGpus(*ec2.DescribeElasticGpusInput) (*ec2.DescribeElasticGpusOutput, error) + DescribeElasticGpusWithContext(aws.Context, *ec2.DescribeElasticGpusInput, ...request.Option) (*ec2.DescribeElasticGpusOutput, error) + DescribeElasticGpusRequest(*ec2.DescribeElasticGpusInput) (*request.Request, *ec2.DescribeElasticGpusOutput) + + DescribeExportImageTasks(*ec2.DescribeExportImageTasksInput) (*ec2.DescribeExportImageTasksOutput, error) + DescribeExportImageTasksWithContext(aws.Context, *ec2.DescribeExportImageTasksInput, ...request.Option) (*ec2.DescribeExportImageTasksOutput, error) + DescribeExportImageTasksRequest(*ec2.DescribeExportImageTasksInput) (*request.Request, *ec2.DescribeExportImageTasksOutput) + + DescribeExportImageTasksPages(*ec2.DescribeExportImageTasksInput, func(*ec2.DescribeExportImageTasksOutput, bool) bool) error + DescribeExportImageTasksPagesWithContext(aws.Context, *ec2.DescribeExportImageTasksInput, func(*ec2.DescribeExportImageTasksOutput, bool) bool, ...request.Option) error + + DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error) + DescribeExportTasksWithContext(aws.Context, *ec2.DescribeExportTasksInput, ...request.Option) (*ec2.DescribeExportTasksOutput, error) + DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput) + + DescribeFastLaunchImages(*ec2.DescribeFastLaunchImagesInput) (*ec2.DescribeFastLaunchImagesOutput, error) + DescribeFastLaunchImagesWithContext(aws.Context, *ec2.DescribeFastLaunchImagesInput, ...request.Option) (*ec2.DescribeFastLaunchImagesOutput, error) + DescribeFastLaunchImagesRequest(*ec2.DescribeFastLaunchImagesInput) (*request.Request, *ec2.DescribeFastLaunchImagesOutput) + + DescribeFastLaunchImagesPages(*ec2.DescribeFastLaunchImagesInput, func(*ec2.DescribeFastLaunchImagesOutput, bool) bool) error + DescribeFastLaunchImagesPagesWithContext(aws.Context, *ec2.DescribeFastLaunchImagesInput, func(*ec2.DescribeFastLaunchImagesOutput, bool) bool, ...request.Option) error + + DescribeFastSnapshotRestores(*ec2.DescribeFastSnapshotRestoresInput) (*ec2.DescribeFastSnapshotRestoresOutput, error) + DescribeFastSnapshotRestoresWithContext(aws.Context, *ec2.DescribeFastSnapshotRestoresInput, ...request.Option) (*ec2.DescribeFastSnapshotRestoresOutput, error) + DescribeFastSnapshotRestoresRequest(*ec2.DescribeFastSnapshotRestoresInput) (*request.Request, *ec2.DescribeFastSnapshotRestoresOutput) + + DescribeFastSnapshotRestoresPages(*ec2.DescribeFastSnapshotRestoresInput, func(*ec2.DescribeFastSnapshotRestoresOutput, bool) bool) error + DescribeFastSnapshotRestoresPagesWithContext(aws.Context, *ec2.DescribeFastSnapshotRestoresInput, func(*ec2.DescribeFastSnapshotRestoresOutput, bool) bool, ...request.Option) error + + DescribeFleetHistory(*ec2.DescribeFleetHistoryInput) (*ec2.DescribeFleetHistoryOutput, error) + DescribeFleetHistoryWithContext(aws.Context, *ec2.DescribeFleetHistoryInput, ...request.Option) (*ec2.DescribeFleetHistoryOutput, error) + DescribeFleetHistoryRequest(*ec2.DescribeFleetHistoryInput) (*request.Request, *ec2.DescribeFleetHistoryOutput) + + DescribeFleetInstances(*ec2.DescribeFleetInstancesInput) (*ec2.DescribeFleetInstancesOutput, error) + DescribeFleetInstancesWithContext(aws.Context, *ec2.DescribeFleetInstancesInput, ...request.Option) (*ec2.DescribeFleetInstancesOutput, error) + DescribeFleetInstancesRequest(*ec2.DescribeFleetInstancesInput) (*request.Request, *ec2.DescribeFleetInstancesOutput) + + DescribeFleets(*ec2.DescribeFleetsInput) (*ec2.DescribeFleetsOutput, error) + DescribeFleetsWithContext(aws.Context, *ec2.DescribeFleetsInput, ...request.Option) (*ec2.DescribeFleetsOutput, error) + DescribeFleetsRequest(*ec2.DescribeFleetsInput) (*request.Request, *ec2.DescribeFleetsOutput) + + DescribeFleetsPages(*ec2.DescribeFleetsInput, func(*ec2.DescribeFleetsOutput, bool) bool) error + DescribeFleetsPagesWithContext(aws.Context, *ec2.DescribeFleetsInput, func(*ec2.DescribeFleetsOutput, bool) bool, ...request.Option) error + + DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error) + DescribeFlowLogsWithContext(aws.Context, *ec2.DescribeFlowLogsInput, ...request.Option) (*ec2.DescribeFlowLogsOutput, error) + DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput) + + DescribeFlowLogsPages(*ec2.DescribeFlowLogsInput, func(*ec2.DescribeFlowLogsOutput, bool) bool) error + DescribeFlowLogsPagesWithContext(aws.Context, *ec2.DescribeFlowLogsInput, func(*ec2.DescribeFlowLogsOutput, bool) bool, ...request.Option) error + + DescribeFpgaImageAttribute(*ec2.DescribeFpgaImageAttributeInput) (*ec2.DescribeFpgaImageAttributeOutput, error) + DescribeFpgaImageAttributeWithContext(aws.Context, *ec2.DescribeFpgaImageAttributeInput, ...request.Option) (*ec2.DescribeFpgaImageAttributeOutput, error) + DescribeFpgaImageAttributeRequest(*ec2.DescribeFpgaImageAttributeInput) (*request.Request, *ec2.DescribeFpgaImageAttributeOutput) + + DescribeFpgaImages(*ec2.DescribeFpgaImagesInput) (*ec2.DescribeFpgaImagesOutput, error) + DescribeFpgaImagesWithContext(aws.Context, *ec2.DescribeFpgaImagesInput, ...request.Option) (*ec2.DescribeFpgaImagesOutput, error) + DescribeFpgaImagesRequest(*ec2.DescribeFpgaImagesInput) (*request.Request, *ec2.DescribeFpgaImagesOutput) + + DescribeFpgaImagesPages(*ec2.DescribeFpgaImagesInput, func(*ec2.DescribeFpgaImagesOutput, bool) bool) error + DescribeFpgaImagesPagesWithContext(aws.Context, *ec2.DescribeFpgaImagesInput, func(*ec2.DescribeFpgaImagesOutput, bool) bool, ...request.Option) error + + DescribeHostReservationOfferings(*ec2.DescribeHostReservationOfferingsInput) (*ec2.DescribeHostReservationOfferingsOutput, error) + DescribeHostReservationOfferingsWithContext(aws.Context, *ec2.DescribeHostReservationOfferingsInput, ...request.Option) (*ec2.DescribeHostReservationOfferingsOutput, error) + DescribeHostReservationOfferingsRequest(*ec2.DescribeHostReservationOfferingsInput) (*request.Request, *ec2.DescribeHostReservationOfferingsOutput) + + DescribeHostReservationOfferingsPages(*ec2.DescribeHostReservationOfferingsInput, func(*ec2.DescribeHostReservationOfferingsOutput, bool) bool) error + DescribeHostReservationOfferingsPagesWithContext(aws.Context, *ec2.DescribeHostReservationOfferingsInput, func(*ec2.DescribeHostReservationOfferingsOutput, bool) bool, ...request.Option) error + + DescribeHostReservations(*ec2.DescribeHostReservationsInput) (*ec2.DescribeHostReservationsOutput, error) + DescribeHostReservationsWithContext(aws.Context, *ec2.DescribeHostReservationsInput, ...request.Option) (*ec2.DescribeHostReservationsOutput, error) + DescribeHostReservationsRequest(*ec2.DescribeHostReservationsInput) (*request.Request, *ec2.DescribeHostReservationsOutput) + + DescribeHostReservationsPages(*ec2.DescribeHostReservationsInput, func(*ec2.DescribeHostReservationsOutput, bool) bool) error + DescribeHostReservationsPagesWithContext(aws.Context, *ec2.DescribeHostReservationsInput, func(*ec2.DescribeHostReservationsOutput, bool) bool, ...request.Option) error + + DescribeHosts(*ec2.DescribeHostsInput) (*ec2.DescribeHostsOutput, error) + DescribeHostsWithContext(aws.Context, *ec2.DescribeHostsInput, ...request.Option) (*ec2.DescribeHostsOutput, error) + DescribeHostsRequest(*ec2.DescribeHostsInput) (*request.Request, *ec2.DescribeHostsOutput) + + DescribeHostsPages(*ec2.DescribeHostsInput, func(*ec2.DescribeHostsOutput, bool) bool) error + DescribeHostsPagesWithContext(aws.Context, *ec2.DescribeHostsInput, func(*ec2.DescribeHostsOutput, bool) bool, ...request.Option) error + + DescribeIamInstanceProfileAssociations(*ec2.DescribeIamInstanceProfileAssociationsInput) (*ec2.DescribeIamInstanceProfileAssociationsOutput, error) + DescribeIamInstanceProfileAssociationsWithContext(aws.Context, *ec2.DescribeIamInstanceProfileAssociationsInput, ...request.Option) (*ec2.DescribeIamInstanceProfileAssociationsOutput, error) + DescribeIamInstanceProfileAssociationsRequest(*ec2.DescribeIamInstanceProfileAssociationsInput) (*request.Request, *ec2.DescribeIamInstanceProfileAssociationsOutput) + + DescribeIamInstanceProfileAssociationsPages(*ec2.DescribeIamInstanceProfileAssociationsInput, func(*ec2.DescribeIamInstanceProfileAssociationsOutput, bool) bool) error + DescribeIamInstanceProfileAssociationsPagesWithContext(aws.Context, *ec2.DescribeIamInstanceProfileAssociationsInput, func(*ec2.DescribeIamInstanceProfileAssociationsOutput, bool) bool, ...request.Option) error + + DescribeIdFormat(*ec2.DescribeIdFormatInput) (*ec2.DescribeIdFormatOutput, error) + DescribeIdFormatWithContext(aws.Context, *ec2.DescribeIdFormatInput, ...request.Option) (*ec2.DescribeIdFormatOutput, error) + DescribeIdFormatRequest(*ec2.DescribeIdFormatInput) (*request.Request, *ec2.DescribeIdFormatOutput) + + DescribeIdentityIdFormat(*ec2.DescribeIdentityIdFormatInput) (*ec2.DescribeIdentityIdFormatOutput, error) + DescribeIdentityIdFormatWithContext(aws.Context, *ec2.DescribeIdentityIdFormatInput, ...request.Option) (*ec2.DescribeIdentityIdFormatOutput, error) + DescribeIdentityIdFormatRequest(*ec2.DescribeIdentityIdFormatInput) (*request.Request, *ec2.DescribeIdentityIdFormatOutput) + + DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error) + DescribeImageAttributeWithContext(aws.Context, *ec2.DescribeImageAttributeInput, ...request.Option) (*ec2.DescribeImageAttributeOutput, error) + DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput) + + DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error) + DescribeImagesWithContext(aws.Context, *ec2.DescribeImagesInput, ...request.Option) (*ec2.DescribeImagesOutput, error) + DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput) + + DescribeImagesPages(*ec2.DescribeImagesInput, func(*ec2.DescribeImagesOutput, bool) bool) error + DescribeImagesPagesWithContext(aws.Context, *ec2.DescribeImagesInput, func(*ec2.DescribeImagesOutput, bool) bool, ...request.Option) error + + DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error) + DescribeImportImageTasksWithContext(aws.Context, *ec2.DescribeImportImageTasksInput, ...request.Option) (*ec2.DescribeImportImageTasksOutput, error) + DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput) + + DescribeImportImageTasksPages(*ec2.DescribeImportImageTasksInput, func(*ec2.DescribeImportImageTasksOutput, bool) bool) error + DescribeImportImageTasksPagesWithContext(aws.Context, *ec2.DescribeImportImageTasksInput, func(*ec2.DescribeImportImageTasksOutput, bool) bool, ...request.Option) error + + DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error) + DescribeImportSnapshotTasksWithContext(aws.Context, *ec2.DescribeImportSnapshotTasksInput, ...request.Option) (*ec2.DescribeImportSnapshotTasksOutput, error) + DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput) + + DescribeImportSnapshotTasksPages(*ec2.DescribeImportSnapshotTasksInput, func(*ec2.DescribeImportSnapshotTasksOutput, bool) bool) error + DescribeImportSnapshotTasksPagesWithContext(aws.Context, *ec2.DescribeImportSnapshotTasksInput, func(*ec2.DescribeImportSnapshotTasksOutput, bool) bool, ...request.Option) error + + DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error) + DescribeInstanceAttributeWithContext(aws.Context, *ec2.DescribeInstanceAttributeInput, ...request.Option) (*ec2.DescribeInstanceAttributeOutput, error) + DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput) + + DescribeInstanceConnectEndpoints(*ec2.DescribeInstanceConnectEndpointsInput) (*ec2.DescribeInstanceConnectEndpointsOutput, error) + DescribeInstanceConnectEndpointsWithContext(aws.Context, *ec2.DescribeInstanceConnectEndpointsInput, ...request.Option) (*ec2.DescribeInstanceConnectEndpointsOutput, error) + DescribeInstanceConnectEndpointsRequest(*ec2.DescribeInstanceConnectEndpointsInput) (*request.Request, *ec2.DescribeInstanceConnectEndpointsOutput) + + DescribeInstanceConnectEndpointsPages(*ec2.DescribeInstanceConnectEndpointsInput, func(*ec2.DescribeInstanceConnectEndpointsOutput, bool) bool) error + DescribeInstanceConnectEndpointsPagesWithContext(aws.Context, *ec2.DescribeInstanceConnectEndpointsInput, func(*ec2.DescribeInstanceConnectEndpointsOutput, bool) bool, ...request.Option) error + + DescribeInstanceCreditSpecifications(*ec2.DescribeInstanceCreditSpecificationsInput) (*ec2.DescribeInstanceCreditSpecificationsOutput, error) + DescribeInstanceCreditSpecificationsWithContext(aws.Context, *ec2.DescribeInstanceCreditSpecificationsInput, ...request.Option) (*ec2.DescribeInstanceCreditSpecificationsOutput, error) + DescribeInstanceCreditSpecificationsRequest(*ec2.DescribeInstanceCreditSpecificationsInput) (*request.Request, *ec2.DescribeInstanceCreditSpecificationsOutput) + + DescribeInstanceCreditSpecificationsPages(*ec2.DescribeInstanceCreditSpecificationsInput, func(*ec2.DescribeInstanceCreditSpecificationsOutput, bool) bool) error + DescribeInstanceCreditSpecificationsPagesWithContext(aws.Context, *ec2.DescribeInstanceCreditSpecificationsInput, func(*ec2.DescribeInstanceCreditSpecificationsOutput, bool) bool, ...request.Option) error + + DescribeInstanceEventNotificationAttributes(*ec2.DescribeInstanceEventNotificationAttributesInput) (*ec2.DescribeInstanceEventNotificationAttributesOutput, error) + DescribeInstanceEventNotificationAttributesWithContext(aws.Context, *ec2.DescribeInstanceEventNotificationAttributesInput, ...request.Option) (*ec2.DescribeInstanceEventNotificationAttributesOutput, error) + DescribeInstanceEventNotificationAttributesRequest(*ec2.DescribeInstanceEventNotificationAttributesInput) (*request.Request, *ec2.DescribeInstanceEventNotificationAttributesOutput) + + DescribeInstanceEventWindows(*ec2.DescribeInstanceEventWindowsInput) (*ec2.DescribeInstanceEventWindowsOutput, error) + DescribeInstanceEventWindowsWithContext(aws.Context, *ec2.DescribeInstanceEventWindowsInput, ...request.Option) (*ec2.DescribeInstanceEventWindowsOutput, error) + DescribeInstanceEventWindowsRequest(*ec2.DescribeInstanceEventWindowsInput) (*request.Request, *ec2.DescribeInstanceEventWindowsOutput) + + DescribeInstanceEventWindowsPages(*ec2.DescribeInstanceEventWindowsInput, func(*ec2.DescribeInstanceEventWindowsOutput, bool) bool) error + DescribeInstanceEventWindowsPagesWithContext(aws.Context, *ec2.DescribeInstanceEventWindowsInput, func(*ec2.DescribeInstanceEventWindowsOutput, bool) bool, ...request.Option) error + + DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error) + DescribeInstanceStatusWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, ...request.Option) (*ec2.DescribeInstanceStatusOutput, error) + DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput) + + DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error + DescribeInstanceStatusPagesWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool, ...request.Option) error + + DescribeInstanceTopology(*ec2.DescribeInstanceTopologyInput) (*ec2.DescribeInstanceTopologyOutput, error) + DescribeInstanceTopologyWithContext(aws.Context, *ec2.DescribeInstanceTopologyInput, ...request.Option) (*ec2.DescribeInstanceTopologyOutput, error) + DescribeInstanceTopologyRequest(*ec2.DescribeInstanceTopologyInput) (*request.Request, *ec2.DescribeInstanceTopologyOutput) + + DescribeInstanceTopologyPages(*ec2.DescribeInstanceTopologyInput, func(*ec2.DescribeInstanceTopologyOutput, bool) bool) error + DescribeInstanceTopologyPagesWithContext(aws.Context, *ec2.DescribeInstanceTopologyInput, func(*ec2.DescribeInstanceTopologyOutput, bool) bool, ...request.Option) error + + DescribeInstanceTypeOfferings(*ec2.DescribeInstanceTypeOfferingsInput) (*ec2.DescribeInstanceTypeOfferingsOutput, error) + DescribeInstanceTypeOfferingsWithContext(aws.Context, *ec2.DescribeInstanceTypeOfferingsInput, ...request.Option) (*ec2.DescribeInstanceTypeOfferingsOutput, error) + DescribeInstanceTypeOfferingsRequest(*ec2.DescribeInstanceTypeOfferingsInput) (*request.Request, *ec2.DescribeInstanceTypeOfferingsOutput) + + DescribeInstanceTypeOfferingsPages(*ec2.DescribeInstanceTypeOfferingsInput, func(*ec2.DescribeInstanceTypeOfferingsOutput, bool) bool) error + DescribeInstanceTypeOfferingsPagesWithContext(aws.Context, *ec2.DescribeInstanceTypeOfferingsInput, func(*ec2.DescribeInstanceTypeOfferingsOutput, bool) bool, ...request.Option) error + + DescribeInstanceTypes(*ec2.DescribeInstanceTypesInput) (*ec2.DescribeInstanceTypesOutput, error) + DescribeInstanceTypesWithContext(aws.Context, *ec2.DescribeInstanceTypesInput, ...request.Option) (*ec2.DescribeInstanceTypesOutput, error) + DescribeInstanceTypesRequest(*ec2.DescribeInstanceTypesInput) (*request.Request, *ec2.DescribeInstanceTypesOutput) + + DescribeInstanceTypesPages(*ec2.DescribeInstanceTypesInput, func(*ec2.DescribeInstanceTypesOutput, bool) bool) error + DescribeInstanceTypesPagesWithContext(aws.Context, *ec2.DescribeInstanceTypesInput, func(*ec2.DescribeInstanceTypesOutput, bool) bool, ...request.Option) error + + DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) + DescribeInstancesWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.Option) (*ec2.DescribeInstancesOutput, error) + DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput) + + DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error + DescribeInstancesPagesWithContext(aws.Context, *ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool, ...request.Option) error + + DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) + DescribeInternetGatewaysWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, ...request.Option) (*ec2.DescribeInternetGatewaysOutput, error) + DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) + + DescribeInternetGatewaysPages(*ec2.DescribeInternetGatewaysInput, func(*ec2.DescribeInternetGatewaysOutput, bool) bool) error + DescribeInternetGatewaysPagesWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, func(*ec2.DescribeInternetGatewaysOutput, bool) bool, ...request.Option) error + + DescribeIpamByoasn(*ec2.DescribeIpamByoasnInput) (*ec2.DescribeIpamByoasnOutput, error) + DescribeIpamByoasnWithContext(aws.Context, *ec2.DescribeIpamByoasnInput, ...request.Option) (*ec2.DescribeIpamByoasnOutput, error) + DescribeIpamByoasnRequest(*ec2.DescribeIpamByoasnInput) (*request.Request, *ec2.DescribeIpamByoasnOutput) + + DescribeIpamExternalResourceVerificationTokens(*ec2.DescribeIpamExternalResourceVerificationTokensInput) (*ec2.DescribeIpamExternalResourceVerificationTokensOutput, error) + DescribeIpamExternalResourceVerificationTokensWithContext(aws.Context, *ec2.DescribeIpamExternalResourceVerificationTokensInput, ...request.Option) (*ec2.DescribeIpamExternalResourceVerificationTokensOutput, error) + DescribeIpamExternalResourceVerificationTokensRequest(*ec2.DescribeIpamExternalResourceVerificationTokensInput) (*request.Request, *ec2.DescribeIpamExternalResourceVerificationTokensOutput) + + DescribeIpamPools(*ec2.DescribeIpamPoolsInput) (*ec2.DescribeIpamPoolsOutput, error) + DescribeIpamPoolsWithContext(aws.Context, *ec2.DescribeIpamPoolsInput, ...request.Option) (*ec2.DescribeIpamPoolsOutput, error) + DescribeIpamPoolsRequest(*ec2.DescribeIpamPoolsInput) (*request.Request, *ec2.DescribeIpamPoolsOutput) + + DescribeIpamPoolsPages(*ec2.DescribeIpamPoolsInput, func(*ec2.DescribeIpamPoolsOutput, bool) bool) error + DescribeIpamPoolsPagesWithContext(aws.Context, *ec2.DescribeIpamPoolsInput, func(*ec2.DescribeIpamPoolsOutput, bool) bool, ...request.Option) error + + DescribeIpamResourceDiscoveries(*ec2.DescribeIpamResourceDiscoveriesInput) (*ec2.DescribeIpamResourceDiscoveriesOutput, error) + DescribeIpamResourceDiscoveriesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveriesInput, ...request.Option) (*ec2.DescribeIpamResourceDiscoveriesOutput, error) + DescribeIpamResourceDiscoveriesRequest(*ec2.DescribeIpamResourceDiscoveriesInput) (*request.Request, *ec2.DescribeIpamResourceDiscoveriesOutput) + + DescribeIpamResourceDiscoveriesPages(*ec2.DescribeIpamResourceDiscoveriesInput, func(*ec2.DescribeIpamResourceDiscoveriesOutput, bool) bool) error + DescribeIpamResourceDiscoveriesPagesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveriesInput, func(*ec2.DescribeIpamResourceDiscoveriesOutput, bool) bool, ...request.Option) error + + DescribeIpamResourceDiscoveryAssociations(*ec2.DescribeIpamResourceDiscoveryAssociationsInput) (*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, error) + DescribeIpamResourceDiscoveryAssociationsWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveryAssociationsInput, ...request.Option) (*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, error) + DescribeIpamResourceDiscoveryAssociationsRequest(*ec2.DescribeIpamResourceDiscoveryAssociationsInput) (*request.Request, *ec2.DescribeIpamResourceDiscoveryAssociationsOutput) + + DescribeIpamResourceDiscoveryAssociationsPages(*ec2.DescribeIpamResourceDiscoveryAssociationsInput, func(*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool) error + DescribeIpamResourceDiscoveryAssociationsPagesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveryAssociationsInput, func(*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool, ...request.Option) error + + DescribeIpamScopes(*ec2.DescribeIpamScopesInput) (*ec2.DescribeIpamScopesOutput, error) + DescribeIpamScopesWithContext(aws.Context, *ec2.DescribeIpamScopesInput, ...request.Option) (*ec2.DescribeIpamScopesOutput, error) + DescribeIpamScopesRequest(*ec2.DescribeIpamScopesInput) (*request.Request, *ec2.DescribeIpamScopesOutput) + + DescribeIpamScopesPages(*ec2.DescribeIpamScopesInput, func(*ec2.DescribeIpamScopesOutput, bool) bool) error + DescribeIpamScopesPagesWithContext(aws.Context, *ec2.DescribeIpamScopesInput, func(*ec2.DescribeIpamScopesOutput, bool) bool, ...request.Option) error + + DescribeIpams(*ec2.DescribeIpamsInput) (*ec2.DescribeIpamsOutput, error) + DescribeIpamsWithContext(aws.Context, *ec2.DescribeIpamsInput, ...request.Option) (*ec2.DescribeIpamsOutput, error) + DescribeIpamsRequest(*ec2.DescribeIpamsInput) (*request.Request, *ec2.DescribeIpamsOutput) + + DescribeIpamsPages(*ec2.DescribeIpamsInput, func(*ec2.DescribeIpamsOutput, bool) bool) error + DescribeIpamsPagesWithContext(aws.Context, *ec2.DescribeIpamsInput, func(*ec2.DescribeIpamsOutput, bool) bool, ...request.Option) error + + DescribeIpv6Pools(*ec2.DescribeIpv6PoolsInput) (*ec2.DescribeIpv6PoolsOutput, error) + DescribeIpv6PoolsWithContext(aws.Context, *ec2.DescribeIpv6PoolsInput, ...request.Option) (*ec2.DescribeIpv6PoolsOutput, error) + DescribeIpv6PoolsRequest(*ec2.DescribeIpv6PoolsInput) (*request.Request, *ec2.DescribeIpv6PoolsOutput) + + DescribeIpv6PoolsPages(*ec2.DescribeIpv6PoolsInput, func(*ec2.DescribeIpv6PoolsOutput, bool) bool) error + DescribeIpv6PoolsPagesWithContext(aws.Context, *ec2.DescribeIpv6PoolsInput, func(*ec2.DescribeIpv6PoolsOutput, bool) bool, ...request.Option) error + + DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) + DescribeKeyPairsWithContext(aws.Context, *ec2.DescribeKeyPairsInput, ...request.Option) (*ec2.DescribeKeyPairsOutput, error) + DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput) + + DescribeLaunchTemplateVersions(*ec2.DescribeLaunchTemplateVersionsInput) (*ec2.DescribeLaunchTemplateVersionsOutput, error) + DescribeLaunchTemplateVersionsWithContext(aws.Context, *ec2.DescribeLaunchTemplateVersionsInput, ...request.Option) (*ec2.DescribeLaunchTemplateVersionsOutput, error) + DescribeLaunchTemplateVersionsRequest(*ec2.DescribeLaunchTemplateVersionsInput) (*request.Request, *ec2.DescribeLaunchTemplateVersionsOutput) + + DescribeLaunchTemplateVersionsPages(*ec2.DescribeLaunchTemplateVersionsInput, func(*ec2.DescribeLaunchTemplateVersionsOutput, bool) bool) error + DescribeLaunchTemplateVersionsPagesWithContext(aws.Context, *ec2.DescribeLaunchTemplateVersionsInput, func(*ec2.DescribeLaunchTemplateVersionsOutput, bool) bool, ...request.Option) error + + DescribeLaunchTemplates(*ec2.DescribeLaunchTemplatesInput) (*ec2.DescribeLaunchTemplatesOutput, error) + DescribeLaunchTemplatesWithContext(aws.Context, *ec2.DescribeLaunchTemplatesInput, ...request.Option) (*ec2.DescribeLaunchTemplatesOutput, error) + DescribeLaunchTemplatesRequest(*ec2.DescribeLaunchTemplatesInput) (*request.Request, *ec2.DescribeLaunchTemplatesOutput) + + DescribeLaunchTemplatesPages(*ec2.DescribeLaunchTemplatesInput, func(*ec2.DescribeLaunchTemplatesOutput, bool) bool) error + DescribeLaunchTemplatesPagesWithContext(aws.Context, *ec2.DescribeLaunchTemplatesInput, func(*ec2.DescribeLaunchTemplatesOutput, bool) bool, ...request.Option) error + + DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) (*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, error) + DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, ...request.Option) (*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, error) + DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) (*request.Request, *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput) + + DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool) error + DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool, ...request.Option) error + + DescribeLocalGatewayRouteTableVpcAssociations(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput) (*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, error) + DescribeLocalGatewayRouteTableVpcAssociationsWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput, ...request.Option) (*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, error) + DescribeLocalGatewayRouteTableVpcAssociationsRequest(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput) (*request.Request, *ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput) + + DescribeLocalGatewayRouteTableVpcAssociationsPages(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool) error + DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool, ...request.Option) error + + DescribeLocalGatewayRouteTables(*ec2.DescribeLocalGatewayRouteTablesInput) (*ec2.DescribeLocalGatewayRouteTablesOutput, error) + DescribeLocalGatewayRouteTablesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTablesInput, ...request.Option) (*ec2.DescribeLocalGatewayRouteTablesOutput, error) + DescribeLocalGatewayRouteTablesRequest(*ec2.DescribeLocalGatewayRouteTablesInput) (*request.Request, *ec2.DescribeLocalGatewayRouteTablesOutput) + + DescribeLocalGatewayRouteTablesPages(*ec2.DescribeLocalGatewayRouteTablesInput, func(*ec2.DescribeLocalGatewayRouteTablesOutput, bool) bool) error + DescribeLocalGatewayRouteTablesPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTablesInput, func(*ec2.DescribeLocalGatewayRouteTablesOutput, bool) bool, ...request.Option) error + + DescribeLocalGatewayVirtualInterfaceGroups(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) (*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, error) + DescribeLocalGatewayVirtualInterfaceGroupsWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput, ...request.Option) (*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, error) + DescribeLocalGatewayVirtualInterfaceGroupsRequest(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) (*request.Request, *ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput) + + DescribeLocalGatewayVirtualInterfaceGroupsPages(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput, func(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool) error + DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput, func(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool, ...request.Option) error + + DescribeLocalGatewayVirtualInterfaces(*ec2.DescribeLocalGatewayVirtualInterfacesInput) (*ec2.DescribeLocalGatewayVirtualInterfacesOutput, error) + DescribeLocalGatewayVirtualInterfacesWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfacesInput, ...request.Option) (*ec2.DescribeLocalGatewayVirtualInterfacesOutput, error) + DescribeLocalGatewayVirtualInterfacesRequest(*ec2.DescribeLocalGatewayVirtualInterfacesInput) (*request.Request, *ec2.DescribeLocalGatewayVirtualInterfacesOutput) + + DescribeLocalGatewayVirtualInterfacesPages(*ec2.DescribeLocalGatewayVirtualInterfacesInput, func(*ec2.DescribeLocalGatewayVirtualInterfacesOutput, bool) bool) error + DescribeLocalGatewayVirtualInterfacesPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfacesInput, func(*ec2.DescribeLocalGatewayVirtualInterfacesOutput, bool) bool, ...request.Option) error + + DescribeLocalGateways(*ec2.DescribeLocalGatewaysInput) (*ec2.DescribeLocalGatewaysOutput, error) + DescribeLocalGatewaysWithContext(aws.Context, *ec2.DescribeLocalGatewaysInput, ...request.Option) (*ec2.DescribeLocalGatewaysOutput, error) + DescribeLocalGatewaysRequest(*ec2.DescribeLocalGatewaysInput) (*request.Request, *ec2.DescribeLocalGatewaysOutput) + + DescribeLocalGatewaysPages(*ec2.DescribeLocalGatewaysInput, func(*ec2.DescribeLocalGatewaysOutput, bool) bool) error + DescribeLocalGatewaysPagesWithContext(aws.Context, *ec2.DescribeLocalGatewaysInput, func(*ec2.DescribeLocalGatewaysOutput, bool) bool, ...request.Option) error + + DescribeLockedSnapshots(*ec2.DescribeLockedSnapshotsInput) (*ec2.DescribeLockedSnapshotsOutput, error) + DescribeLockedSnapshotsWithContext(aws.Context, *ec2.DescribeLockedSnapshotsInput, ...request.Option) (*ec2.DescribeLockedSnapshotsOutput, error) + DescribeLockedSnapshotsRequest(*ec2.DescribeLockedSnapshotsInput) (*request.Request, *ec2.DescribeLockedSnapshotsOutput) + + DescribeMacHosts(*ec2.DescribeMacHostsInput) (*ec2.DescribeMacHostsOutput, error) + DescribeMacHostsWithContext(aws.Context, *ec2.DescribeMacHostsInput, ...request.Option) (*ec2.DescribeMacHostsOutput, error) + DescribeMacHostsRequest(*ec2.DescribeMacHostsInput) (*request.Request, *ec2.DescribeMacHostsOutput) + + DescribeMacHostsPages(*ec2.DescribeMacHostsInput, func(*ec2.DescribeMacHostsOutput, bool) bool) error + DescribeMacHostsPagesWithContext(aws.Context, *ec2.DescribeMacHostsInput, func(*ec2.DescribeMacHostsOutput, bool) bool, ...request.Option) error + + DescribeManagedPrefixLists(*ec2.DescribeManagedPrefixListsInput) (*ec2.DescribeManagedPrefixListsOutput, error) + DescribeManagedPrefixListsWithContext(aws.Context, *ec2.DescribeManagedPrefixListsInput, ...request.Option) (*ec2.DescribeManagedPrefixListsOutput, error) + DescribeManagedPrefixListsRequest(*ec2.DescribeManagedPrefixListsInput) (*request.Request, *ec2.DescribeManagedPrefixListsOutput) + + DescribeManagedPrefixListsPages(*ec2.DescribeManagedPrefixListsInput, func(*ec2.DescribeManagedPrefixListsOutput, bool) bool) error + DescribeManagedPrefixListsPagesWithContext(aws.Context, *ec2.DescribeManagedPrefixListsInput, func(*ec2.DescribeManagedPrefixListsOutput, bool) bool, ...request.Option) error + + DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error) + DescribeMovingAddressesWithContext(aws.Context, *ec2.DescribeMovingAddressesInput, ...request.Option) (*ec2.DescribeMovingAddressesOutput, error) + DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) + + DescribeMovingAddressesPages(*ec2.DescribeMovingAddressesInput, func(*ec2.DescribeMovingAddressesOutput, bool) bool) error + DescribeMovingAddressesPagesWithContext(aws.Context, *ec2.DescribeMovingAddressesInput, func(*ec2.DescribeMovingAddressesOutput, bool) bool, ...request.Option) error + + DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error) + DescribeNatGatewaysWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, ...request.Option) (*ec2.DescribeNatGatewaysOutput, error) + DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput) + + DescribeNatGatewaysPages(*ec2.DescribeNatGatewaysInput, func(*ec2.DescribeNatGatewaysOutput, bool) bool) error + DescribeNatGatewaysPagesWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, func(*ec2.DescribeNatGatewaysOutput, bool) bool, ...request.Option) error + + DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error) + DescribeNetworkAclsWithContext(aws.Context, *ec2.DescribeNetworkAclsInput, ...request.Option) (*ec2.DescribeNetworkAclsOutput, error) + DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput) + + DescribeNetworkAclsPages(*ec2.DescribeNetworkAclsInput, func(*ec2.DescribeNetworkAclsOutput, bool) bool) error + DescribeNetworkAclsPagesWithContext(aws.Context, *ec2.DescribeNetworkAclsInput, func(*ec2.DescribeNetworkAclsOutput, bool) bool, ...request.Option) error + + DescribeNetworkInsightsAccessScopeAnalyses(*ec2.DescribeNetworkInsightsAccessScopeAnalysesInput) (*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, error) + DescribeNetworkInsightsAccessScopeAnalysesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopeAnalysesInput, ...request.Option) (*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, error) + DescribeNetworkInsightsAccessScopeAnalysesRequest(*ec2.DescribeNetworkInsightsAccessScopeAnalysesInput) (*request.Request, *ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput) + + DescribeNetworkInsightsAccessScopeAnalysesPages(*ec2.DescribeNetworkInsightsAccessScopeAnalysesInput, func(*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, bool) bool) error + DescribeNetworkInsightsAccessScopeAnalysesPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopeAnalysesInput, func(*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, bool) bool, ...request.Option) error + + DescribeNetworkInsightsAccessScopes(*ec2.DescribeNetworkInsightsAccessScopesInput) (*ec2.DescribeNetworkInsightsAccessScopesOutput, error) + DescribeNetworkInsightsAccessScopesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopesInput, ...request.Option) (*ec2.DescribeNetworkInsightsAccessScopesOutput, error) + DescribeNetworkInsightsAccessScopesRequest(*ec2.DescribeNetworkInsightsAccessScopesInput) (*request.Request, *ec2.DescribeNetworkInsightsAccessScopesOutput) + + DescribeNetworkInsightsAccessScopesPages(*ec2.DescribeNetworkInsightsAccessScopesInput, func(*ec2.DescribeNetworkInsightsAccessScopesOutput, bool) bool) error + DescribeNetworkInsightsAccessScopesPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopesInput, func(*ec2.DescribeNetworkInsightsAccessScopesOutput, bool) bool, ...request.Option) error + + DescribeNetworkInsightsAnalyses(*ec2.DescribeNetworkInsightsAnalysesInput) (*ec2.DescribeNetworkInsightsAnalysesOutput, error) + DescribeNetworkInsightsAnalysesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAnalysesInput, ...request.Option) (*ec2.DescribeNetworkInsightsAnalysesOutput, error) + DescribeNetworkInsightsAnalysesRequest(*ec2.DescribeNetworkInsightsAnalysesInput) (*request.Request, *ec2.DescribeNetworkInsightsAnalysesOutput) + + DescribeNetworkInsightsAnalysesPages(*ec2.DescribeNetworkInsightsAnalysesInput, func(*ec2.DescribeNetworkInsightsAnalysesOutput, bool) bool) error + DescribeNetworkInsightsAnalysesPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAnalysesInput, func(*ec2.DescribeNetworkInsightsAnalysesOutput, bool) bool, ...request.Option) error + + DescribeNetworkInsightsPaths(*ec2.DescribeNetworkInsightsPathsInput) (*ec2.DescribeNetworkInsightsPathsOutput, error) + DescribeNetworkInsightsPathsWithContext(aws.Context, *ec2.DescribeNetworkInsightsPathsInput, ...request.Option) (*ec2.DescribeNetworkInsightsPathsOutput, error) + DescribeNetworkInsightsPathsRequest(*ec2.DescribeNetworkInsightsPathsInput) (*request.Request, *ec2.DescribeNetworkInsightsPathsOutput) + + DescribeNetworkInsightsPathsPages(*ec2.DescribeNetworkInsightsPathsInput, func(*ec2.DescribeNetworkInsightsPathsOutput, bool) bool) error + DescribeNetworkInsightsPathsPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsPathsInput, func(*ec2.DescribeNetworkInsightsPathsOutput, bool) bool, ...request.Option) error + + DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error) + DescribeNetworkInterfaceAttributeWithContext(aws.Context, *ec2.DescribeNetworkInterfaceAttributeInput, ...request.Option) (*ec2.DescribeNetworkInterfaceAttributeOutput, error) + DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput) + + DescribeNetworkInterfacePermissions(*ec2.DescribeNetworkInterfacePermissionsInput) (*ec2.DescribeNetworkInterfacePermissionsOutput, error) + DescribeNetworkInterfacePermissionsWithContext(aws.Context, *ec2.DescribeNetworkInterfacePermissionsInput, ...request.Option) (*ec2.DescribeNetworkInterfacePermissionsOutput, error) + DescribeNetworkInterfacePermissionsRequest(*ec2.DescribeNetworkInterfacePermissionsInput) (*request.Request, *ec2.DescribeNetworkInterfacePermissionsOutput) + + DescribeNetworkInterfacePermissionsPages(*ec2.DescribeNetworkInterfacePermissionsInput, func(*ec2.DescribeNetworkInterfacePermissionsOutput, bool) bool) error + DescribeNetworkInterfacePermissionsPagesWithContext(aws.Context, *ec2.DescribeNetworkInterfacePermissionsInput, func(*ec2.DescribeNetworkInterfacePermissionsOutput, bool) bool, ...request.Option) error + + DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error) + DescribeNetworkInterfacesWithContext(aws.Context, *ec2.DescribeNetworkInterfacesInput, ...request.Option) (*ec2.DescribeNetworkInterfacesOutput, error) + DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput) + + DescribeNetworkInterfacesPages(*ec2.DescribeNetworkInterfacesInput, func(*ec2.DescribeNetworkInterfacesOutput, bool) bool) error + DescribeNetworkInterfacesPagesWithContext(aws.Context, *ec2.DescribeNetworkInterfacesInput, func(*ec2.DescribeNetworkInterfacesOutput, bool) bool, ...request.Option) error + + DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error) + DescribePlacementGroupsWithContext(aws.Context, *ec2.DescribePlacementGroupsInput, ...request.Option) (*ec2.DescribePlacementGroupsOutput, error) + DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput) + + DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error) + DescribePrefixListsWithContext(aws.Context, *ec2.DescribePrefixListsInput, ...request.Option) (*ec2.DescribePrefixListsOutput, error) + DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput) + + DescribePrefixListsPages(*ec2.DescribePrefixListsInput, func(*ec2.DescribePrefixListsOutput, bool) bool) error + DescribePrefixListsPagesWithContext(aws.Context, *ec2.DescribePrefixListsInput, func(*ec2.DescribePrefixListsOutput, bool) bool, ...request.Option) error + + DescribePrincipalIdFormat(*ec2.DescribePrincipalIdFormatInput) (*ec2.DescribePrincipalIdFormatOutput, error) + DescribePrincipalIdFormatWithContext(aws.Context, *ec2.DescribePrincipalIdFormatInput, ...request.Option) (*ec2.DescribePrincipalIdFormatOutput, error) + DescribePrincipalIdFormatRequest(*ec2.DescribePrincipalIdFormatInput) (*request.Request, *ec2.DescribePrincipalIdFormatOutput) + + DescribePrincipalIdFormatPages(*ec2.DescribePrincipalIdFormatInput, func(*ec2.DescribePrincipalIdFormatOutput, bool) bool) error + DescribePrincipalIdFormatPagesWithContext(aws.Context, *ec2.DescribePrincipalIdFormatInput, func(*ec2.DescribePrincipalIdFormatOutput, bool) bool, ...request.Option) error + + DescribePublicIpv4Pools(*ec2.DescribePublicIpv4PoolsInput) (*ec2.DescribePublicIpv4PoolsOutput, error) + DescribePublicIpv4PoolsWithContext(aws.Context, *ec2.DescribePublicIpv4PoolsInput, ...request.Option) (*ec2.DescribePublicIpv4PoolsOutput, error) + DescribePublicIpv4PoolsRequest(*ec2.DescribePublicIpv4PoolsInput) (*request.Request, *ec2.DescribePublicIpv4PoolsOutput) + + DescribePublicIpv4PoolsPages(*ec2.DescribePublicIpv4PoolsInput, func(*ec2.DescribePublicIpv4PoolsOutput, bool) bool) error + DescribePublicIpv4PoolsPagesWithContext(aws.Context, *ec2.DescribePublicIpv4PoolsInput, func(*ec2.DescribePublicIpv4PoolsOutput, bool) bool, ...request.Option) error + + DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) + DescribeRegionsWithContext(aws.Context, *ec2.DescribeRegionsInput, ...request.Option) (*ec2.DescribeRegionsOutput, error) + DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput) + + DescribeReplaceRootVolumeTasks(*ec2.DescribeReplaceRootVolumeTasksInput) (*ec2.DescribeReplaceRootVolumeTasksOutput, error) + DescribeReplaceRootVolumeTasksWithContext(aws.Context, *ec2.DescribeReplaceRootVolumeTasksInput, ...request.Option) (*ec2.DescribeReplaceRootVolumeTasksOutput, error) + DescribeReplaceRootVolumeTasksRequest(*ec2.DescribeReplaceRootVolumeTasksInput) (*request.Request, *ec2.DescribeReplaceRootVolumeTasksOutput) + + DescribeReplaceRootVolumeTasksPages(*ec2.DescribeReplaceRootVolumeTasksInput, func(*ec2.DescribeReplaceRootVolumeTasksOutput, bool) bool) error + DescribeReplaceRootVolumeTasksPagesWithContext(aws.Context, *ec2.DescribeReplaceRootVolumeTasksInput, func(*ec2.DescribeReplaceRootVolumeTasksOutput, bool) bool, ...request.Option) error + + DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error) + DescribeReservedInstancesWithContext(aws.Context, *ec2.DescribeReservedInstancesInput, ...request.Option) (*ec2.DescribeReservedInstancesOutput, error) + DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput) + + DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error) + DescribeReservedInstancesListingsWithContext(aws.Context, *ec2.DescribeReservedInstancesListingsInput, ...request.Option) (*ec2.DescribeReservedInstancesListingsOutput, error) + DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput) + + DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error) + DescribeReservedInstancesModificationsWithContext(aws.Context, *ec2.DescribeReservedInstancesModificationsInput, ...request.Option) (*ec2.DescribeReservedInstancesModificationsOutput, error) + DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput) + + DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error + DescribeReservedInstancesModificationsPagesWithContext(aws.Context, *ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool, ...request.Option) error + + DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error) + DescribeReservedInstancesOfferingsWithContext(aws.Context, *ec2.DescribeReservedInstancesOfferingsInput, ...request.Option) (*ec2.DescribeReservedInstancesOfferingsOutput, error) + DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput) + + DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error + DescribeReservedInstancesOfferingsPagesWithContext(aws.Context, *ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool, ...request.Option) error + + DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error) + DescribeRouteTablesWithContext(aws.Context, *ec2.DescribeRouteTablesInput, ...request.Option) (*ec2.DescribeRouteTablesOutput, error) + DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput) + + DescribeRouteTablesPages(*ec2.DescribeRouteTablesInput, func(*ec2.DescribeRouteTablesOutput, bool) bool) error + DescribeRouteTablesPagesWithContext(aws.Context, *ec2.DescribeRouteTablesInput, func(*ec2.DescribeRouteTablesOutput, bool) bool, ...request.Option) error + + DescribeScheduledInstanceAvailability(*ec2.DescribeScheduledInstanceAvailabilityInput) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error) + DescribeScheduledInstanceAvailabilityWithContext(aws.Context, *ec2.DescribeScheduledInstanceAvailabilityInput, ...request.Option) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error) + DescribeScheduledInstanceAvailabilityRequest(*ec2.DescribeScheduledInstanceAvailabilityInput) (*request.Request, *ec2.DescribeScheduledInstanceAvailabilityOutput) + + DescribeScheduledInstanceAvailabilityPages(*ec2.DescribeScheduledInstanceAvailabilityInput, func(*ec2.DescribeScheduledInstanceAvailabilityOutput, bool) bool) error + DescribeScheduledInstanceAvailabilityPagesWithContext(aws.Context, *ec2.DescribeScheduledInstanceAvailabilityInput, func(*ec2.DescribeScheduledInstanceAvailabilityOutput, bool) bool, ...request.Option) error + + DescribeScheduledInstances(*ec2.DescribeScheduledInstancesInput) (*ec2.DescribeScheduledInstancesOutput, error) + DescribeScheduledInstancesWithContext(aws.Context, *ec2.DescribeScheduledInstancesInput, ...request.Option) (*ec2.DescribeScheduledInstancesOutput, error) + DescribeScheduledInstancesRequest(*ec2.DescribeScheduledInstancesInput) (*request.Request, *ec2.DescribeScheduledInstancesOutput) + + DescribeScheduledInstancesPages(*ec2.DescribeScheduledInstancesInput, func(*ec2.DescribeScheduledInstancesOutput, bool) bool) error + DescribeScheduledInstancesPagesWithContext(aws.Context, *ec2.DescribeScheduledInstancesInput, func(*ec2.DescribeScheduledInstancesOutput, bool) bool, ...request.Option) error + + DescribeSecurityGroupReferences(*ec2.DescribeSecurityGroupReferencesInput) (*ec2.DescribeSecurityGroupReferencesOutput, error) + DescribeSecurityGroupReferencesWithContext(aws.Context, *ec2.DescribeSecurityGroupReferencesInput, ...request.Option) (*ec2.DescribeSecurityGroupReferencesOutput, error) + DescribeSecurityGroupReferencesRequest(*ec2.DescribeSecurityGroupReferencesInput) (*request.Request, *ec2.DescribeSecurityGroupReferencesOutput) + + DescribeSecurityGroupRules(*ec2.DescribeSecurityGroupRulesInput) (*ec2.DescribeSecurityGroupRulesOutput, error) + DescribeSecurityGroupRulesWithContext(aws.Context, *ec2.DescribeSecurityGroupRulesInput, ...request.Option) (*ec2.DescribeSecurityGroupRulesOutput, error) + DescribeSecurityGroupRulesRequest(*ec2.DescribeSecurityGroupRulesInput) (*request.Request, *ec2.DescribeSecurityGroupRulesOutput) + + DescribeSecurityGroupRulesPages(*ec2.DescribeSecurityGroupRulesInput, func(*ec2.DescribeSecurityGroupRulesOutput, bool) bool) error + DescribeSecurityGroupRulesPagesWithContext(aws.Context, *ec2.DescribeSecurityGroupRulesInput, func(*ec2.DescribeSecurityGroupRulesOutput, bool) bool, ...request.Option) error + + DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error) + DescribeSecurityGroupsWithContext(aws.Context, *ec2.DescribeSecurityGroupsInput, ...request.Option) (*ec2.DescribeSecurityGroupsOutput, error) + DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput) + + DescribeSecurityGroupsPages(*ec2.DescribeSecurityGroupsInput, func(*ec2.DescribeSecurityGroupsOutput, bool) bool) error + DescribeSecurityGroupsPagesWithContext(aws.Context, *ec2.DescribeSecurityGroupsInput, func(*ec2.DescribeSecurityGroupsOutput, bool) bool, ...request.Option) error + + DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error) + DescribeSnapshotAttributeWithContext(aws.Context, *ec2.DescribeSnapshotAttributeInput, ...request.Option) (*ec2.DescribeSnapshotAttributeOutput, error) + DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput) + + DescribeSnapshotTierStatus(*ec2.DescribeSnapshotTierStatusInput) (*ec2.DescribeSnapshotTierStatusOutput, error) + DescribeSnapshotTierStatusWithContext(aws.Context, *ec2.DescribeSnapshotTierStatusInput, ...request.Option) (*ec2.DescribeSnapshotTierStatusOutput, error) + DescribeSnapshotTierStatusRequest(*ec2.DescribeSnapshotTierStatusInput) (*request.Request, *ec2.DescribeSnapshotTierStatusOutput) + + DescribeSnapshotTierStatusPages(*ec2.DescribeSnapshotTierStatusInput, func(*ec2.DescribeSnapshotTierStatusOutput, bool) bool) error + DescribeSnapshotTierStatusPagesWithContext(aws.Context, *ec2.DescribeSnapshotTierStatusInput, func(*ec2.DescribeSnapshotTierStatusOutput, bool) bool, ...request.Option) error + + DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error) + DescribeSnapshotsWithContext(aws.Context, *ec2.DescribeSnapshotsInput, ...request.Option) (*ec2.DescribeSnapshotsOutput, error) + DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput) + + DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error + DescribeSnapshotsPagesWithContext(aws.Context, *ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool, ...request.Option) error + + DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error) + DescribeSpotDatafeedSubscriptionWithContext(aws.Context, *ec2.DescribeSpotDatafeedSubscriptionInput, ...request.Option) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error) + DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput) + + DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error) + DescribeSpotFleetInstancesWithContext(aws.Context, *ec2.DescribeSpotFleetInstancesInput, ...request.Option) (*ec2.DescribeSpotFleetInstancesOutput, error) + DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput) + + DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error) + DescribeSpotFleetRequestHistoryWithContext(aws.Context, *ec2.DescribeSpotFleetRequestHistoryInput, ...request.Option) (*ec2.DescribeSpotFleetRequestHistoryOutput, error) + DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput) + + DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error) + DescribeSpotFleetRequestsWithContext(aws.Context, *ec2.DescribeSpotFleetRequestsInput, ...request.Option) (*ec2.DescribeSpotFleetRequestsOutput, error) + DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput) + + DescribeSpotFleetRequestsPages(*ec2.DescribeSpotFleetRequestsInput, func(*ec2.DescribeSpotFleetRequestsOutput, bool) bool) error + DescribeSpotFleetRequestsPagesWithContext(aws.Context, *ec2.DescribeSpotFleetRequestsInput, func(*ec2.DescribeSpotFleetRequestsOutput, bool) bool, ...request.Option) error + + DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error) + DescribeSpotInstanceRequestsWithContext(aws.Context, *ec2.DescribeSpotInstanceRequestsInput, ...request.Option) (*ec2.DescribeSpotInstanceRequestsOutput, error) + DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput) + + DescribeSpotInstanceRequestsPages(*ec2.DescribeSpotInstanceRequestsInput, func(*ec2.DescribeSpotInstanceRequestsOutput, bool) bool) error + DescribeSpotInstanceRequestsPagesWithContext(aws.Context, *ec2.DescribeSpotInstanceRequestsInput, func(*ec2.DescribeSpotInstanceRequestsOutput, bool) bool, ...request.Option) error + + DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error) + DescribeSpotPriceHistoryWithContext(aws.Context, *ec2.DescribeSpotPriceHistoryInput, ...request.Option) (*ec2.DescribeSpotPriceHistoryOutput, error) + DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput) + + DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error + DescribeSpotPriceHistoryPagesWithContext(aws.Context, *ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool, ...request.Option) error + + DescribeStaleSecurityGroups(*ec2.DescribeStaleSecurityGroupsInput) (*ec2.DescribeStaleSecurityGroupsOutput, error) + DescribeStaleSecurityGroupsWithContext(aws.Context, *ec2.DescribeStaleSecurityGroupsInput, ...request.Option) (*ec2.DescribeStaleSecurityGroupsOutput, error) + DescribeStaleSecurityGroupsRequest(*ec2.DescribeStaleSecurityGroupsInput) (*request.Request, *ec2.DescribeStaleSecurityGroupsOutput) + + DescribeStaleSecurityGroupsPages(*ec2.DescribeStaleSecurityGroupsInput, func(*ec2.DescribeStaleSecurityGroupsOutput, bool) bool) error + DescribeStaleSecurityGroupsPagesWithContext(aws.Context, *ec2.DescribeStaleSecurityGroupsInput, func(*ec2.DescribeStaleSecurityGroupsOutput, bool) bool, ...request.Option) error + + DescribeStoreImageTasks(*ec2.DescribeStoreImageTasksInput) (*ec2.DescribeStoreImageTasksOutput, error) + DescribeStoreImageTasksWithContext(aws.Context, *ec2.DescribeStoreImageTasksInput, ...request.Option) (*ec2.DescribeStoreImageTasksOutput, error) + DescribeStoreImageTasksRequest(*ec2.DescribeStoreImageTasksInput) (*request.Request, *ec2.DescribeStoreImageTasksOutput) + + DescribeStoreImageTasksPages(*ec2.DescribeStoreImageTasksInput, func(*ec2.DescribeStoreImageTasksOutput, bool) bool) error + DescribeStoreImageTasksPagesWithContext(aws.Context, *ec2.DescribeStoreImageTasksInput, func(*ec2.DescribeStoreImageTasksOutput, bool) bool, ...request.Option) error + + DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error) + DescribeSubnetsWithContext(aws.Context, *ec2.DescribeSubnetsInput, ...request.Option) (*ec2.DescribeSubnetsOutput, error) + DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput) + + DescribeSubnetsPages(*ec2.DescribeSubnetsInput, func(*ec2.DescribeSubnetsOutput, bool) bool) error + DescribeSubnetsPagesWithContext(aws.Context, *ec2.DescribeSubnetsInput, func(*ec2.DescribeSubnetsOutput, bool) bool, ...request.Option) error + + DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) + DescribeTagsWithContext(aws.Context, *ec2.DescribeTagsInput, ...request.Option) (*ec2.DescribeTagsOutput, error) + DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput) + + DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error + DescribeTagsPagesWithContext(aws.Context, *ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool, ...request.Option) error + + DescribeTrafficMirrorFilterRules(*ec2.DescribeTrafficMirrorFilterRulesInput) (*ec2.DescribeTrafficMirrorFilterRulesOutput, error) + DescribeTrafficMirrorFilterRulesWithContext(aws.Context, *ec2.DescribeTrafficMirrorFilterRulesInput, ...request.Option) (*ec2.DescribeTrafficMirrorFilterRulesOutput, error) + DescribeTrafficMirrorFilterRulesRequest(*ec2.DescribeTrafficMirrorFilterRulesInput) (*request.Request, *ec2.DescribeTrafficMirrorFilterRulesOutput) + + DescribeTrafficMirrorFilters(*ec2.DescribeTrafficMirrorFiltersInput) (*ec2.DescribeTrafficMirrorFiltersOutput, error) + DescribeTrafficMirrorFiltersWithContext(aws.Context, *ec2.DescribeTrafficMirrorFiltersInput, ...request.Option) (*ec2.DescribeTrafficMirrorFiltersOutput, error) + DescribeTrafficMirrorFiltersRequest(*ec2.DescribeTrafficMirrorFiltersInput) (*request.Request, *ec2.DescribeTrafficMirrorFiltersOutput) + + DescribeTrafficMirrorFiltersPages(*ec2.DescribeTrafficMirrorFiltersInput, func(*ec2.DescribeTrafficMirrorFiltersOutput, bool) bool) error + DescribeTrafficMirrorFiltersPagesWithContext(aws.Context, *ec2.DescribeTrafficMirrorFiltersInput, func(*ec2.DescribeTrafficMirrorFiltersOutput, bool) bool, ...request.Option) error + + DescribeTrafficMirrorSessions(*ec2.DescribeTrafficMirrorSessionsInput) (*ec2.DescribeTrafficMirrorSessionsOutput, error) + DescribeTrafficMirrorSessionsWithContext(aws.Context, *ec2.DescribeTrafficMirrorSessionsInput, ...request.Option) (*ec2.DescribeTrafficMirrorSessionsOutput, error) + DescribeTrafficMirrorSessionsRequest(*ec2.DescribeTrafficMirrorSessionsInput) (*request.Request, *ec2.DescribeTrafficMirrorSessionsOutput) + + DescribeTrafficMirrorSessionsPages(*ec2.DescribeTrafficMirrorSessionsInput, func(*ec2.DescribeTrafficMirrorSessionsOutput, bool) bool) error + DescribeTrafficMirrorSessionsPagesWithContext(aws.Context, *ec2.DescribeTrafficMirrorSessionsInput, func(*ec2.DescribeTrafficMirrorSessionsOutput, bool) bool, ...request.Option) error + + DescribeTrafficMirrorTargets(*ec2.DescribeTrafficMirrorTargetsInput) (*ec2.DescribeTrafficMirrorTargetsOutput, error) + DescribeTrafficMirrorTargetsWithContext(aws.Context, *ec2.DescribeTrafficMirrorTargetsInput, ...request.Option) (*ec2.DescribeTrafficMirrorTargetsOutput, error) + DescribeTrafficMirrorTargetsRequest(*ec2.DescribeTrafficMirrorTargetsInput) (*request.Request, *ec2.DescribeTrafficMirrorTargetsOutput) + + DescribeTrafficMirrorTargetsPages(*ec2.DescribeTrafficMirrorTargetsInput, func(*ec2.DescribeTrafficMirrorTargetsOutput, bool) bool) error + DescribeTrafficMirrorTargetsPagesWithContext(aws.Context, *ec2.DescribeTrafficMirrorTargetsInput, func(*ec2.DescribeTrafficMirrorTargetsOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayAttachments(*ec2.DescribeTransitGatewayAttachmentsInput) (*ec2.DescribeTransitGatewayAttachmentsOutput, error) + DescribeTransitGatewayAttachmentsWithContext(aws.Context, *ec2.DescribeTransitGatewayAttachmentsInput, ...request.Option) (*ec2.DescribeTransitGatewayAttachmentsOutput, error) + DescribeTransitGatewayAttachmentsRequest(*ec2.DescribeTransitGatewayAttachmentsInput) (*request.Request, *ec2.DescribeTransitGatewayAttachmentsOutput) + + DescribeTransitGatewayAttachmentsPages(*ec2.DescribeTransitGatewayAttachmentsInput, func(*ec2.DescribeTransitGatewayAttachmentsOutput, bool) bool) error + DescribeTransitGatewayAttachmentsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayAttachmentsInput, func(*ec2.DescribeTransitGatewayAttachmentsOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayConnectPeers(*ec2.DescribeTransitGatewayConnectPeersInput) (*ec2.DescribeTransitGatewayConnectPeersOutput, error) + DescribeTransitGatewayConnectPeersWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectPeersInput, ...request.Option) (*ec2.DescribeTransitGatewayConnectPeersOutput, error) + DescribeTransitGatewayConnectPeersRequest(*ec2.DescribeTransitGatewayConnectPeersInput) (*request.Request, *ec2.DescribeTransitGatewayConnectPeersOutput) + + DescribeTransitGatewayConnectPeersPages(*ec2.DescribeTransitGatewayConnectPeersInput, func(*ec2.DescribeTransitGatewayConnectPeersOutput, bool) bool) error + DescribeTransitGatewayConnectPeersPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectPeersInput, func(*ec2.DescribeTransitGatewayConnectPeersOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayConnects(*ec2.DescribeTransitGatewayConnectsInput) (*ec2.DescribeTransitGatewayConnectsOutput, error) + DescribeTransitGatewayConnectsWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectsInput, ...request.Option) (*ec2.DescribeTransitGatewayConnectsOutput, error) + DescribeTransitGatewayConnectsRequest(*ec2.DescribeTransitGatewayConnectsInput) (*request.Request, *ec2.DescribeTransitGatewayConnectsOutput) + + DescribeTransitGatewayConnectsPages(*ec2.DescribeTransitGatewayConnectsInput, func(*ec2.DescribeTransitGatewayConnectsOutput, bool) bool) error + DescribeTransitGatewayConnectsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectsInput, func(*ec2.DescribeTransitGatewayConnectsOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayMulticastDomains(*ec2.DescribeTransitGatewayMulticastDomainsInput) (*ec2.DescribeTransitGatewayMulticastDomainsOutput, error) + DescribeTransitGatewayMulticastDomainsWithContext(aws.Context, *ec2.DescribeTransitGatewayMulticastDomainsInput, ...request.Option) (*ec2.DescribeTransitGatewayMulticastDomainsOutput, error) + DescribeTransitGatewayMulticastDomainsRequest(*ec2.DescribeTransitGatewayMulticastDomainsInput) (*request.Request, *ec2.DescribeTransitGatewayMulticastDomainsOutput) + + DescribeTransitGatewayMulticastDomainsPages(*ec2.DescribeTransitGatewayMulticastDomainsInput, func(*ec2.DescribeTransitGatewayMulticastDomainsOutput, bool) bool) error + DescribeTransitGatewayMulticastDomainsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayMulticastDomainsInput, func(*ec2.DescribeTransitGatewayMulticastDomainsOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayPeeringAttachments(*ec2.DescribeTransitGatewayPeeringAttachmentsInput) (*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, error) + DescribeTransitGatewayPeeringAttachmentsWithContext(aws.Context, *ec2.DescribeTransitGatewayPeeringAttachmentsInput, ...request.Option) (*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, error) + DescribeTransitGatewayPeeringAttachmentsRequest(*ec2.DescribeTransitGatewayPeeringAttachmentsInput) (*request.Request, *ec2.DescribeTransitGatewayPeeringAttachmentsOutput) + + DescribeTransitGatewayPeeringAttachmentsPages(*ec2.DescribeTransitGatewayPeeringAttachmentsInput, func(*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool) error + DescribeTransitGatewayPeeringAttachmentsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayPeeringAttachmentsInput, func(*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayPolicyTables(*ec2.DescribeTransitGatewayPolicyTablesInput) (*ec2.DescribeTransitGatewayPolicyTablesOutput, error) + DescribeTransitGatewayPolicyTablesWithContext(aws.Context, *ec2.DescribeTransitGatewayPolicyTablesInput, ...request.Option) (*ec2.DescribeTransitGatewayPolicyTablesOutput, error) + DescribeTransitGatewayPolicyTablesRequest(*ec2.DescribeTransitGatewayPolicyTablesInput) (*request.Request, *ec2.DescribeTransitGatewayPolicyTablesOutput) + + DescribeTransitGatewayPolicyTablesPages(*ec2.DescribeTransitGatewayPolicyTablesInput, func(*ec2.DescribeTransitGatewayPolicyTablesOutput, bool) bool) error + DescribeTransitGatewayPolicyTablesPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayPolicyTablesInput, func(*ec2.DescribeTransitGatewayPolicyTablesOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayRouteTableAnnouncements(*ec2.DescribeTransitGatewayRouteTableAnnouncementsInput) (*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, error) + DescribeTransitGatewayRouteTableAnnouncementsWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTableAnnouncementsInput, ...request.Option) (*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, error) + DescribeTransitGatewayRouteTableAnnouncementsRequest(*ec2.DescribeTransitGatewayRouteTableAnnouncementsInput) (*request.Request, *ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput) + + DescribeTransitGatewayRouteTableAnnouncementsPages(*ec2.DescribeTransitGatewayRouteTableAnnouncementsInput, func(*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, bool) bool) error + DescribeTransitGatewayRouteTableAnnouncementsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTableAnnouncementsInput, func(*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayRouteTables(*ec2.DescribeTransitGatewayRouteTablesInput) (*ec2.DescribeTransitGatewayRouteTablesOutput, error) + DescribeTransitGatewayRouteTablesWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTablesInput, ...request.Option) (*ec2.DescribeTransitGatewayRouteTablesOutput, error) + DescribeTransitGatewayRouteTablesRequest(*ec2.DescribeTransitGatewayRouteTablesInput) (*request.Request, *ec2.DescribeTransitGatewayRouteTablesOutput) + + DescribeTransitGatewayRouteTablesPages(*ec2.DescribeTransitGatewayRouteTablesInput, func(*ec2.DescribeTransitGatewayRouteTablesOutput, bool) bool) error + DescribeTransitGatewayRouteTablesPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTablesInput, func(*ec2.DescribeTransitGatewayRouteTablesOutput, bool) bool, ...request.Option) error + + DescribeTransitGatewayVpcAttachments(*ec2.DescribeTransitGatewayVpcAttachmentsInput) (*ec2.DescribeTransitGatewayVpcAttachmentsOutput, error) + DescribeTransitGatewayVpcAttachmentsWithContext(aws.Context, *ec2.DescribeTransitGatewayVpcAttachmentsInput, ...request.Option) (*ec2.DescribeTransitGatewayVpcAttachmentsOutput, error) + DescribeTransitGatewayVpcAttachmentsRequest(*ec2.DescribeTransitGatewayVpcAttachmentsInput) (*request.Request, *ec2.DescribeTransitGatewayVpcAttachmentsOutput) + + DescribeTransitGatewayVpcAttachmentsPages(*ec2.DescribeTransitGatewayVpcAttachmentsInput, func(*ec2.DescribeTransitGatewayVpcAttachmentsOutput, bool) bool) error + DescribeTransitGatewayVpcAttachmentsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayVpcAttachmentsInput, func(*ec2.DescribeTransitGatewayVpcAttachmentsOutput, bool) bool, ...request.Option) error + + DescribeTransitGateways(*ec2.DescribeTransitGatewaysInput) (*ec2.DescribeTransitGatewaysOutput, error) + DescribeTransitGatewaysWithContext(aws.Context, *ec2.DescribeTransitGatewaysInput, ...request.Option) (*ec2.DescribeTransitGatewaysOutput, error) + DescribeTransitGatewaysRequest(*ec2.DescribeTransitGatewaysInput) (*request.Request, *ec2.DescribeTransitGatewaysOutput) + + DescribeTransitGatewaysPages(*ec2.DescribeTransitGatewaysInput, func(*ec2.DescribeTransitGatewaysOutput, bool) bool) error + DescribeTransitGatewaysPagesWithContext(aws.Context, *ec2.DescribeTransitGatewaysInput, func(*ec2.DescribeTransitGatewaysOutput, bool) bool, ...request.Option) error + + DescribeTrunkInterfaceAssociations(*ec2.DescribeTrunkInterfaceAssociationsInput) (*ec2.DescribeTrunkInterfaceAssociationsOutput, error) + DescribeTrunkInterfaceAssociationsWithContext(aws.Context, *ec2.DescribeTrunkInterfaceAssociationsInput, ...request.Option) (*ec2.DescribeTrunkInterfaceAssociationsOutput, error) + DescribeTrunkInterfaceAssociationsRequest(*ec2.DescribeTrunkInterfaceAssociationsInput) (*request.Request, *ec2.DescribeTrunkInterfaceAssociationsOutput) + + DescribeTrunkInterfaceAssociationsPages(*ec2.DescribeTrunkInterfaceAssociationsInput, func(*ec2.DescribeTrunkInterfaceAssociationsOutput, bool) bool) error + DescribeTrunkInterfaceAssociationsPagesWithContext(aws.Context, *ec2.DescribeTrunkInterfaceAssociationsInput, func(*ec2.DescribeTrunkInterfaceAssociationsOutput, bool) bool, ...request.Option) error + + DescribeVerifiedAccessEndpoints(*ec2.DescribeVerifiedAccessEndpointsInput) (*ec2.DescribeVerifiedAccessEndpointsOutput, error) + DescribeVerifiedAccessEndpointsWithContext(aws.Context, *ec2.DescribeVerifiedAccessEndpointsInput, ...request.Option) (*ec2.DescribeVerifiedAccessEndpointsOutput, error) + DescribeVerifiedAccessEndpointsRequest(*ec2.DescribeVerifiedAccessEndpointsInput) (*request.Request, *ec2.DescribeVerifiedAccessEndpointsOutput) + + DescribeVerifiedAccessEndpointsPages(*ec2.DescribeVerifiedAccessEndpointsInput, func(*ec2.DescribeVerifiedAccessEndpointsOutput, bool) bool) error + DescribeVerifiedAccessEndpointsPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessEndpointsInput, func(*ec2.DescribeVerifiedAccessEndpointsOutput, bool) bool, ...request.Option) error + + DescribeVerifiedAccessGroups(*ec2.DescribeVerifiedAccessGroupsInput) (*ec2.DescribeVerifiedAccessGroupsOutput, error) + DescribeVerifiedAccessGroupsWithContext(aws.Context, *ec2.DescribeVerifiedAccessGroupsInput, ...request.Option) (*ec2.DescribeVerifiedAccessGroupsOutput, error) + DescribeVerifiedAccessGroupsRequest(*ec2.DescribeVerifiedAccessGroupsInput) (*request.Request, *ec2.DescribeVerifiedAccessGroupsOutput) + + DescribeVerifiedAccessGroupsPages(*ec2.DescribeVerifiedAccessGroupsInput, func(*ec2.DescribeVerifiedAccessGroupsOutput, bool) bool) error + DescribeVerifiedAccessGroupsPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessGroupsInput, func(*ec2.DescribeVerifiedAccessGroupsOutput, bool) bool, ...request.Option) error + + DescribeVerifiedAccessInstanceLoggingConfigurations(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput) (*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, error) + DescribeVerifiedAccessInstanceLoggingConfigurationsWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput, ...request.Option) (*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, error) + DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput) (*request.Request, *ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput) + + DescribeVerifiedAccessInstanceLoggingConfigurationsPages(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput, func(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, bool) bool) error + DescribeVerifiedAccessInstanceLoggingConfigurationsPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput, func(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, bool) bool, ...request.Option) error + + DescribeVerifiedAccessInstances(*ec2.DescribeVerifiedAccessInstancesInput) (*ec2.DescribeVerifiedAccessInstancesOutput, error) + DescribeVerifiedAccessInstancesWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstancesInput, ...request.Option) (*ec2.DescribeVerifiedAccessInstancesOutput, error) + DescribeVerifiedAccessInstancesRequest(*ec2.DescribeVerifiedAccessInstancesInput) (*request.Request, *ec2.DescribeVerifiedAccessInstancesOutput) + + DescribeVerifiedAccessInstancesPages(*ec2.DescribeVerifiedAccessInstancesInput, func(*ec2.DescribeVerifiedAccessInstancesOutput, bool) bool) error + DescribeVerifiedAccessInstancesPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstancesInput, func(*ec2.DescribeVerifiedAccessInstancesOutput, bool) bool, ...request.Option) error + + DescribeVerifiedAccessTrustProviders(*ec2.DescribeVerifiedAccessTrustProvidersInput) (*ec2.DescribeVerifiedAccessTrustProvidersOutput, error) + DescribeVerifiedAccessTrustProvidersWithContext(aws.Context, *ec2.DescribeVerifiedAccessTrustProvidersInput, ...request.Option) (*ec2.DescribeVerifiedAccessTrustProvidersOutput, error) + DescribeVerifiedAccessTrustProvidersRequest(*ec2.DescribeVerifiedAccessTrustProvidersInput) (*request.Request, *ec2.DescribeVerifiedAccessTrustProvidersOutput) + + DescribeVerifiedAccessTrustProvidersPages(*ec2.DescribeVerifiedAccessTrustProvidersInput, func(*ec2.DescribeVerifiedAccessTrustProvidersOutput, bool) bool) error + DescribeVerifiedAccessTrustProvidersPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessTrustProvidersInput, func(*ec2.DescribeVerifiedAccessTrustProvidersOutput, bool) bool, ...request.Option) error + + DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error) + DescribeVolumeAttributeWithContext(aws.Context, *ec2.DescribeVolumeAttributeInput, ...request.Option) (*ec2.DescribeVolumeAttributeOutput, error) + DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput) + + DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error) + DescribeVolumeStatusWithContext(aws.Context, *ec2.DescribeVolumeStatusInput, ...request.Option) (*ec2.DescribeVolumeStatusOutput, error) + DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput) + + DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error + DescribeVolumeStatusPagesWithContext(aws.Context, *ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool, ...request.Option) error + + DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error) + DescribeVolumesWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.Option) (*ec2.DescribeVolumesOutput, error) + DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput) + + DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error + DescribeVolumesPagesWithContext(aws.Context, *ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool, ...request.Option) error + + DescribeVolumesModifications(*ec2.DescribeVolumesModificationsInput) (*ec2.DescribeVolumesModificationsOutput, error) + DescribeVolumesModificationsWithContext(aws.Context, *ec2.DescribeVolumesModificationsInput, ...request.Option) (*ec2.DescribeVolumesModificationsOutput, error) + DescribeVolumesModificationsRequest(*ec2.DescribeVolumesModificationsInput) (*request.Request, *ec2.DescribeVolumesModificationsOutput) + + DescribeVolumesModificationsPages(*ec2.DescribeVolumesModificationsInput, func(*ec2.DescribeVolumesModificationsOutput, bool) bool) error + DescribeVolumesModificationsPagesWithContext(aws.Context, *ec2.DescribeVolumesModificationsInput, func(*ec2.DescribeVolumesModificationsOutput, bool) bool, ...request.Option) error + + DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error) + DescribeVpcAttributeWithContext(aws.Context, *ec2.DescribeVpcAttributeInput, ...request.Option) (*ec2.DescribeVpcAttributeOutput, error) + DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput) + + DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error) + DescribeVpcClassicLinkWithContext(aws.Context, *ec2.DescribeVpcClassicLinkInput, ...request.Option) (*ec2.DescribeVpcClassicLinkOutput, error) + DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput) + + DescribeVpcClassicLinkDnsSupport(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error) + DescribeVpcClassicLinkDnsSupportWithContext(aws.Context, *ec2.DescribeVpcClassicLinkDnsSupportInput, ...request.Option) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error) + DescribeVpcClassicLinkDnsSupportRequest(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DescribeVpcClassicLinkDnsSupportOutput) + + DescribeVpcClassicLinkDnsSupportPages(*ec2.DescribeVpcClassicLinkDnsSupportInput, func(*ec2.DescribeVpcClassicLinkDnsSupportOutput, bool) bool) error + DescribeVpcClassicLinkDnsSupportPagesWithContext(aws.Context, *ec2.DescribeVpcClassicLinkDnsSupportInput, func(*ec2.DescribeVpcClassicLinkDnsSupportOutput, bool) bool, ...request.Option) error + + DescribeVpcEndpointConnectionNotifications(*ec2.DescribeVpcEndpointConnectionNotificationsInput) (*ec2.DescribeVpcEndpointConnectionNotificationsOutput, error) + DescribeVpcEndpointConnectionNotificationsWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionNotificationsInput, ...request.Option) (*ec2.DescribeVpcEndpointConnectionNotificationsOutput, error) + DescribeVpcEndpointConnectionNotificationsRequest(*ec2.DescribeVpcEndpointConnectionNotificationsInput) (*request.Request, *ec2.DescribeVpcEndpointConnectionNotificationsOutput) + + DescribeVpcEndpointConnectionNotificationsPages(*ec2.DescribeVpcEndpointConnectionNotificationsInput, func(*ec2.DescribeVpcEndpointConnectionNotificationsOutput, bool) bool) error + DescribeVpcEndpointConnectionNotificationsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionNotificationsInput, func(*ec2.DescribeVpcEndpointConnectionNotificationsOutput, bool) bool, ...request.Option) error + + DescribeVpcEndpointConnections(*ec2.DescribeVpcEndpointConnectionsInput) (*ec2.DescribeVpcEndpointConnectionsOutput, error) + DescribeVpcEndpointConnectionsWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionsInput, ...request.Option) (*ec2.DescribeVpcEndpointConnectionsOutput, error) + DescribeVpcEndpointConnectionsRequest(*ec2.DescribeVpcEndpointConnectionsInput) (*request.Request, *ec2.DescribeVpcEndpointConnectionsOutput) + + DescribeVpcEndpointConnectionsPages(*ec2.DescribeVpcEndpointConnectionsInput, func(*ec2.DescribeVpcEndpointConnectionsOutput, bool) bool) error + DescribeVpcEndpointConnectionsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionsInput, func(*ec2.DescribeVpcEndpointConnectionsOutput, bool) bool, ...request.Option) error + + DescribeVpcEndpointServiceConfigurations(*ec2.DescribeVpcEndpointServiceConfigurationsInput) (*ec2.DescribeVpcEndpointServiceConfigurationsOutput, error) + DescribeVpcEndpointServiceConfigurationsWithContext(aws.Context, *ec2.DescribeVpcEndpointServiceConfigurationsInput, ...request.Option) (*ec2.DescribeVpcEndpointServiceConfigurationsOutput, error) + DescribeVpcEndpointServiceConfigurationsRequest(*ec2.DescribeVpcEndpointServiceConfigurationsInput) (*request.Request, *ec2.DescribeVpcEndpointServiceConfigurationsOutput) + + DescribeVpcEndpointServiceConfigurationsPages(*ec2.DescribeVpcEndpointServiceConfigurationsInput, func(*ec2.DescribeVpcEndpointServiceConfigurationsOutput, bool) bool) error + DescribeVpcEndpointServiceConfigurationsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointServiceConfigurationsInput, func(*ec2.DescribeVpcEndpointServiceConfigurationsOutput, bool) bool, ...request.Option) error + + DescribeVpcEndpointServicePermissions(*ec2.DescribeVpcEndpointServicePermissionsInput) (*ec2.DescribeVpcEndpointServicePermissionsOutput, error) + DescribeVpcEndpointServicePermissionsWithContext(aws.Context, *ec2.DescribeVpcEndpointServicePermissionsInput, ...request.Option) (*ec2.DescribeVpcEndpointServicePermissionsOutput, error) + DescribeVpcEndpointServicePermissionsRequest(*ec2.DescribeVpcEndpointServicePermissionsInput) (*request.Request, *ec2.DescribeVpcEndpointServicePermissionsOutput) + + DescribeVpcEndpointServicePermissionsPages(*ec2.DescribeVpcEndpointServicePermissionsInput, func(*ec2.DescribeVpcEndpointServicePermissionsOutput, bool) bool) error + DescribeVpcEndpointServicePermissionsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointServicePermissionsInput, func(*ec2.DescribeVpcEndpointServicePermissionsOutput, bool) bool, ...request.Option) error + + DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error) + DescribeVpcEndpointServicesWithContext(aws.Context, *ec2.DescribeVpcEndpointServicesInput, ...request.Option) (*ec2.DescribeVpcEndpointServicesOutput, error) + DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput) + + DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error) + DescribeVpcEndpointsWithContext(aws.Context, *ec2.DescribeVpcEndpointsInput, ...request.Option) (*ec2.DescribeVpcEndpointsOutput, error) + DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput) + + DescribeVpcEndpointsPages(*ec2.DescribeVpcEndpointsInput, func(*ec2.DescribeVpcEndpointsOutput, bool) bool) error + DescribeVpcEndpointsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointsInput, func(*ec2.DescribeVpcEndpointsOutput, bool) bool, ...request.Option) error + + DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error) + DescribeVpcPeeringConnectionsWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, ...request.Option) (*ec2.DescribeVpcPeeringConnectionsOutput, error) + DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput) + + DescribeVpcPeeringConnectionsPages(*ec2.DescribeVpcPeeringConnectionsInput, func(*ec2.DescribeVpcPeeringConnectionsOutput, bool) bool) error + DescribeVpcPeeringConnectionsPagesWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, func(*ec2.DescribeVpcPeeringConnectionsOutput, bool) bool, ...request.Option) error + + DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) + DescribeVpcsWithContext(aws.Context, *ec2.DescribeVpcsInput, ...request.Option) (*ec2.DescribeVpcsOutput, error) + DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput) + + DescribeVpcsPages(*ec2.DescribeVpcsInput, func(*ec2.DescribeVpcsOutput, bool) bool) error + DescribeVpcsPagesWithContext(aws.Context, *ec2.DescribeVpcsInput, func(*ec2.DescribeVpcsOutput, bool) bool, ...request.Option) error + + DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error) + DescribeVpnConnectionsWithContext(aws.Context, *ec2.DescribeVpnConnectionsInput, ...request.Option) (*ec2.DescribeVpnConnectionsOutput, error) + DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput) + + DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error) + DescribeVpnGatewaysWithContext(aws.Context, *ec2.DescribeVpnGatewaysInput, ...request.Option) (*ec2.DescribeVpnGatewaysOutput, error) + DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput) + + DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error) + DetachClassicLinkVpcWithContext(aws.Context, *ec2.DetachClassicLinkVpcInput, ...request.Option) (*ec2.DetachClassicLinkVpcOutput, error) + DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput) + + DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error) + DetachInternetGatewayWithContext(aws.Context, *ec2.DetachInternetGatewayInput, ...request.Option) (*ec2.DetachInternetGatewayOutput, error) + DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput) + + DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error) + DetachNetworkInterfaceWithContext(aws.Context, *ec2.DetachNetworkInterfaceInput, ...request.Option) (*ec2.DetachNetworkInterfaceOutput, error) + DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput) + + DetachVerifiedAccessTrustProvider(*ec2.DetachVerifiedAccessTrustProviderInput) (*ec2.DetachVerifiedAccessTrustProviderOutput, error) + DetachVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.DetachVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.DetachVerifiedAccessTrustProviderOutput, error) + DetachVerifiedAccessTrustProviderRequest(*ec2.DetachVerifiedAccessTrustProviderInput) (*request.Request, *ec2.DetachVerifiedAccessTrustProviderOutput) + + DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) + DetachVolumeWithContext(aws.Context, *ec2.DetachVolumeInput, ...request.Option) (*ec2.VolumeAttachment, error) + DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error) + DetachVpnGatewayWithContext(aws.Context, *ec2.DetachVpnGatewayInput, ...request.Option) (*ec2.DetachVpnGatewayOutput, error) + DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput) + + DisableAddressTransfer(*ec2.DisableAddressTransferInput) (*ec2.DisableAddressTransferOutput, error) + DisableAddressTransferWithContext(aws.Context, *ec2.DisableAddressTransferInput, ...request.Option) (*ec2.DisableAddressTransferOutput, error) + DisableAddressTransferRequest(*ec2.DisableAddressTransferInput) (*request.Request, *ec2.DisableAddressTransferOutput) + + DisableAwsNetworkPerformanceMetricSubscription(*ec2.DisableAwsNetworkPerformanceMetricSubscriptionInput) (*ec2.DisableAwsNetworkPerformanceMetricSubscriptionOutput, error) + DisableAwsNetworkPerformanceMetricSubscriptionWithContext(aws.Context, *ec2.DisableAwsNetworkPerformanceMetricSubscriptionInput, ...request.Option) (*ec2.DisableAwsNetworkPerformanceMetricSubscriptionOutput, error) + DisableAwsNetworkPerformanceMetricSubscriptionRequest(*ec2.DisableAwsNetworkPerformanceMetricSubscriptionInput) (*request.Request, *ec2.DisableAwsNetworkPerformanceMetricSubscriptionOutput) + + DisableEbsEncryptionByDefault(*ec2.DisableEbsEncryptionByDefaultInput) (*ec2.DisableEbsEncryptionByDefaultOutput, error) + DisableEbsEncryptionByDefaultWithContext(aws.Context, *ec2.DisableEbsEncryptionByDefaultInput, ...request.Option) (*ec2.DisableEbsEncryptionByDefaultOutput, error) + DisableEbsEncryptionByDefaultRequest(*ec2.DisableEbsEncryptionByDefaultInput) (*request.Request, *ec2.DisableEbsEncryptionByDefaultOutput) + + DisableFastLaunch(*ec2.DisableFastLaunchInput) (*ec2.DisableFastLaunchOutput, error) + DisableFastLaunchWithContext(aws.Context, *ec2.DisableFastLaunchInput, ...request.Option) (*ec2.DisableFastLaunchOutput, error) + DisableFastLaunchRequest(*ec2.DisableFastLaunchInput) (*request.Request, *ec2.DisableFastLaunchOutput) + + DisableFastSnapshotRestores(*ec2.DisableFastSnapshotRestoresInput) (*ec2.DisableFastSnapshotRestoresOutput, error) + DisableFastSnapshotRestoresWithContext(aws.Context, *ec2.DisableFastSnapshotRestoresInput, ...request.Option) (*ec2.DisableFastSnapshotRestoresOutput, error) + DisableFastSnapshotRestoresRequest(*ec2.DisableFastSnapshotRestoresInput) (*request.Request, *ec2.DisableFastSnapshotRestoresOutput) + + DisableImage(*ec2.DisableImageInput) (*ec2.DisableImageOutput, error) + DisableImageWithContext(aws.Context, *ec2.DisableImageInput, ...request.Option) (*ec2.DisableImageOutput, error) + DisableImageRequest(*ec2.DisableImageInput) (*request.Request, *ec2.DisableImageOutput) + + DisableImageBlockPublicAccess(*ec2.DisableImageBlockPublicAccessInput) (*ec2.DisableImageBlockPublicAccessOutput, error) + DisableImageBlockPublicAccessWithContext(aws.Context, *ec2.DisableImageBlockPublicAccessInput, ...request.Option) (*ec2.DisableImageBlockPublicAccessOutput, error) + DisableImageBlockPublicAccessRequest(*ec2.DisableImageBlockPublicAccessInput) (*request.Request, *ec2.DisableImageBlockPublicAccessOutput) + + DisableImageDeprecation(*ec2.DisableImageDeprecationInput) (*ec2.DisableImageDeprecationOutput, error) + DisableImageDeprecationWithContext(aws.Context, *ec2.DisableImageDeprecationInput, ...request.Option) (*ec2.DisableImageDeprecationOutput, error) + DisableImageDeprecationRequest(*ec2.DisableImageDeprecationInput) (*request.Request, *ec2.DisableImageDeprecationOutput) + + DisableImageDeregistrationProtection(*ec2.DisableImageDeregistrationProtectionInput) (*ec2.DisableImageDeregistrationProtectionOutput, error) + DisableImageDeregistrationProtectionWithContext(aws.Context, *ec2.DisableImageDeregistrationProtectionInput, ...request.Option) (*ec2.DisableImageDeregistrationProtectionOutput, error) + DisableImageDeregistrationProtectionRequest(*ec2.DisableImageDeregistrationProtectionInput) (*request.Request, *ec2.DisableImageDeregistrationProtectionOutput) + + DisableIpamOrganizationAdminAccount(*ec2.DisableIpamOrganizationAdminAccountInput) (*ec2.DisableIpamOrganizationAdminAccountOutput, error) + DisableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.DisableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.DisableIpamOrganizationAdminAccountOutput, error) + DisableIpamOrganizationAdminAccountRequest(*ec2.DisableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.DisableIpamOrganizationAdminAccountOutput) + + DisableSerialConsoleAccess(*ec2.DisableSerialConsoleAccessInput) (*ec2.DisableSerialConsoleAccessOutput, error) + DisableSerialConsoleAccessWithContext(aws.Context, *ec2.DisableSerialConsoleAccessInput, ...request.Option) (*ec2.DisableSerialConsoleAccessOutput, error) + DisableSerialConsoleAccessRequest(*ec2.DisableSerialConsoleAccessInput) (*request.Request, *ec2.DisableSerialConsoleAccessOutput) + + DisableSnapshotBlockPublicAccess(*ec2.DisableSnapshotBlockPublicAccessInput) (*ec2.DisableSnapshotBlockPublicAccessOutput, error) + DisableSnapshotBlockPublicAccessWithContext(aws.Context, *ec2.DisableSnapshotBlockPublicAccessInput, ...request.Option) (*ec2.DisableSnapshotBlockPublicAccessOutput, error) + DisableSnapshotBlockPublicAccessRequest(*ec2.DisableSnapshotBlockPublicAccessInput) (*request.Request, *ec2.DisableSnapshotBlockPublicAccessOutput) + + DisableTransitGatewayRouteTablePropagation(*ec2.DisableTransitGatewayRouteTablePropagationInput) (*ec2.DisableTransitGatewayRouteTablePropagationOutput, error) + DisableTransitGatewayRouteTablePropagationWithContext(aws.Context, *ec2.DisableTransitGatewayRouteTablePropagationInput, ...request.Option) (*ec2.DisableTransitGatewayRouteTablePropagationOutput, error) + DisableTransitGatewayRouteTablePropagationRequest(*ec2.DisableTransitGatewayRouteTablePropagationInput) (*request.Request, *ec2.DisableTransitGatewayRouteTablePropagationOutput) + + DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error) + DisableVgwRoutePropagationWithContext(aws.Context, *ec2.DisableVgwRoutePropagationInput, ...request.Option) (*ec2.DisableVgwRoutePropagationOutput, error) + DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput) + + DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error) + DisableVpcClassicLinkWithContext(aws.Context, *ec2.DisableVpcClassicLinkInput, ...request.Option) (*ec2.DisableVpcClassicLinkOutput, error) + DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput) + + DisableVpcClassicLinkDnsSupport(*ec2.DisableVpcClassicLinkDnsSupportInput) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error) + DisableVpcClassicLinkDnsSupportWithContext(aws.Context, *ec2.DisableVpcClassicLinkDnsSupportInput, ...request.Option) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error) + DisableVpcClassicLinkDnsSupportRequest(*ec2.DisableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DisableVpcClassicLinkDnsSupportOutput) + + DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error) + DisassociateAddressWithContext(aws.Context, *ec2.DisassociateAddressInput, ...request.Option) (*ec2.DisassociateAddressOutput, error) + DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput) + + DisassociateClientVpnTargetNetwork(*ec2.DisassociateClientVpnTargetNetworkInput) (*ec2.DisassociateClientVpnTargetNetworkOutput, error) + DisassociateClientVpnTargetNetworkWithContext(aws.Context, *ec2.DisassociateClientVpnTargetNetworkInput, ...request.Option) (*ec2.DisassociateClientVpnTargetNetworkOutput, error) + DisassociateClientVpnTargetNetworkRequest(*ec2.DisassociateClientVpnTargetNetworkInput) (*request.Request, *ec2.DisassociateClientVpnTargetNetworkOutput) + + DisassociateEnclaveCertificateIamRole(*ec2.DisassociateEnclaveCertificateIamRoleInput) (*ec2.DisassociateEnclaveCertificateIamRoleOutput, error) + DisassociateEnclaveCertificateIamRoleWithContext(aws.Context, *ec2.DisassociateEnclaveCertificateIamRoleInput, ...request.Option) (*ec2.DisassociateEnclaveCertificateIamRoleOutput, error) + DisassociateEnclaveCertificateIamRoleRequest(*ec2.DisassociateEnclaveCertificateIamRoleInput) (*request.Request, *ec2.DisassociateEnclaveCertificateIamRoleOutput) + + DisassociateIamInstanceProfile(*ec2.DisassociateIamInstanceProfileInput) (*ec2.DisassociateIamInstanceProfileOutput, error) + DisassociateIamInstanceProfileWithContext(aws.Context, *ec2.DisassociateIamInstanceProfileInput, ...request.Option) (*ec2.DisassociateIamInstanceProfileOutput, error) + DisassociateIamInstanceProfileRequest(*ec2.DisassociateIamInstanceProfileInput) (*request.Request, *ec2.DisassociateIamInstanceProfileOutput) + + DisassociateInstanceEventWindow(*ec2.DisassociateInstanceEventWindowInput) (*ec2.DisassociateInstanceEventWindowOutput, error) + DisassociateInstanceEventWindowWithContext(aws.Context, *ec2.DisassociateInstanceEventWindowInput, ...request.Option) (*ec2.DisassociateInstanceEventWindowOutput, error) + DisassociateInstanceEventWindowRequest(*ec2.DisassociateInstanceEventWindowInput) (*request.Request, *ec2.DisassociateInstanceEventWindowOutput) + + DisassociateIpamByoasn(*ec2.DisassociateIpamByoasnInput) (*ec2.DisassociateIpamByoasnOutput, error) + DisassociateIpamByoasnWithContext(aws.Context, *ec2.DisassociateIpamByoasnInput, ...request.Option) (*ec2.DisassociateIpamByoasnOutput, error) + DisassociateIpamByoasnRequest(*ec2.DisassociateIpamByoasnInput) (*request.Request, *ec2.DisassociateIpamByoasnOutput) + + DisassociateIpamResourceDiscovery(*ec2.DisassociateIpamResourceDiscoveryInput) (*ec2.DisassociateIpamResourceDiscoveryOutput, error) + DisassociateIpamResourceDiscoveryWithContext(aws.Context, *ec2.DisassociateIpamResourceDiscoveryInput, ...request.Option) (*ec2.DisassociateIpamResourceDiscoveryOutput, error) + DisassociateIpamResourceDiscoveryRequest(*ec2.DisassociateIpamResourceDiscoveryInput) (*request.Request, *ec2.DisassociateIpamResourceDiscoveryOutput) + + DisassociateNatGatewayAddress(*ec2.DisassociateNatGatewayAddressInput) (*ec2.DisassociateNatGatewayAddressOutput, error) + DisassociateNatGatewayAddressWithContext(aws.Context, *ec2.DisassociateNatGatewayAddressInput, ...request.Option) (*ec2.DisassociateNatGatewayAddressOutput, error) + DisassociateNatGatewayAddressRequest(*ec2.DisassociateNatGatewayAddressInput) (*request.Request, *ec2.DisassociateNatGatewayAddressOutput) + + DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error) + DisassociateRouteTableWithContext(aws.Context, *ec2.DisassociateRouteTableInput, ...request.Option) (*ec2.DisassociateRouteTableOutput, error) + DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput) + + DisassociateSubnetCidrBlock(*ec2.DisassociateSubnetCidrBlockInput) (*ec2.DisassociateSubnetCidrBlockOutput, error) + DisassociateSubnetCidrBlockWithContext(aws.Context, *ec2.DisassociateSubnetCidrBlockInput, ...request.Option) (*ec2.DisassociateSubnetCidrBlockOutput, error) + DisassociateSubnetCidrBlockRequest(*ec2.DisassociateSubnetCidrBlockInput) (*request.Request, *ec2.DisassociateSubnetCidrBlockOutput) + + DisassociateTransitGatewayMulticastDomain(*ec2.DisassociateTransitGatewayMulticastDomainInput) (*ec2.DisassociateTransitGatewayMulticastDomainOutput, error) + DisassociateTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.DisassociateTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.DisassociateTransitGatewayMulticastDomainOutput, error) + DisassociateTransitGatewayMulticastDomainRequest(*ec2.DisassociateTransitGatewayMulticastDomainInput) (*request.Request, *ec2.DisassociateTransitGatewayMulticastDomainOutput) + + DisassociateTransitGatewayPolicyTable(*ec2.DisassociateTransitGatewayPolicyTableInput) (*ec2.DisassociateTransitGatewayPolicyTableOutput, error) + DisassociateTransitGatewayPolicyTableWithContext(aws.Context, *ec2.DisassociateTransitGatewayPolicyTableInput, ...request.Option) (*ec2.DisassociateTransitGatewayPolicyTableOutput, error) + DisassociateTransitGatewayPolicyTableRequest(*ec2.DisassociateTransitGatewayPolicyTableInput) (*request.Request, *ec2.DisassociateTransitGatewayPolicyTableOutput) + + DisassociateTransitGatewayRouteTable(*ec2.DisassociateTransitGatewayRouteTableInput) (*ec2.DisassociateTransitGatewayRouteTableOutput, error) + DisassociateTransitGatewayRouteTableWithContext(aws.Context, *ec2.DisassociateTransitGatewayRouteTableInput, ...request.Option) (*ec2.DisassociateTransitGatewayRouteTableOutput, error) + DisassociateTransitGatewayRouteTableRequest(*ec2.DisassociateTransitGatewayRouteTableInput) (*request.Request, *ec2.DisassociateTransitGatewayRouteTableOutput) + + DisassociateTrunkInterface(*ec2.DisassociateTrunkInterfaceInput) (*ec2.DisassociateTrunkInterfaceOutput, error) + DisassociateTrunkInterfaceWithContext(aws.Context, *ec2.DisassociateTrunkInterfaceInput, ...request.Option) (*ec2.DisassociateTrunkInterfaceOutput, error) + DisassociateTrunkInterfaceRequest(*ec2.DisassociateTrunkInterfaceInput) (*request.Request, *ec2.DisassociateTrunkInterfaceOutput) + + DisassociateVpcCidrBlock(*ec2.DisassociateVpcCidrBlockInput) (*ec2.DisassociateVpcCidrBlockOutput, error) + DisassociateVpcCidrBlockWithContext(aws.Context, *ec2.DisassociateVpcCidrBlockInput, ...request.Option) (*ec2.DisassociateVpcCidrBlockOutput, error) + DisassociateVpcCidrBlockRequest(*ec2.DisassociateVpcCidrBlockInput) (*request.Request, *ec2.DisassociateVpcCidrBlockOutput) + + EnableAddressTransfer(*ec2.EnableAddressTransferInput) (*ec2.EnableAddressTransferOutput, error) + EnableAddressTransferWithContext(aws.Context, *ec2.EnableAddressTransferInput, ...request.Option) (*ec2.EnableAddressTransferOutput, error) + EnableAddressTransferRequest(*ec2.EnableAddressTransferInput) (*request.Request, *ec2.EnableAddressTransferOutput) + + EnableAwsNetworkPerformanceMetricSubscription(*ec2.EnableAwsNetworkPerformanceMetricSubscriptionInput) (*ec2.EnableAwsNetworkPerformanceMetricSubscriptionOutput, error) + EnableAwsNetworkPerformanceMetricSubscriptionWithContext(aws.Context, *ec2.EnableAwsNetworkPerformanceMetricSubscriptionInput, ...request.Option) (*ec2.EnableAwsNetworkPerformanceMetricSubscriptionOutput, error) + EnableAwsNetworkPerformanceMetricSubscriptionRequest(*ec2.EnableAwsNetworkPerformanceMetricSubscriptionInput) (*request.Request, *ec2.EnableAwsNetworkPerformanceMetricSubscriptionOutput) + + EnableEbsEncryptionByDefault(*ec2.EnableEbsEncryptionByDefaultInput) (*ec2.EnableEbsEncryptionByDefaultOutput, error) + EnableEbsEncryptionByDefaultWithContext(aws.Context, *ec2.EnableEbsEncryptionByDefaultInput, ...request.Option) (*ec2.EnableEbsEncryptionByDefaultOutput, error) + EnableEbsEncryptionByDefaultRequest(*ec2.EnableEbsEncryptionByDefaultInput) (*request.Request, *ec2.EnableEbsEncryptionByDefaultOutput) + + EnableFastLaunch(*ec2.EnableFastLaunchInput) (*ec2.EnableFastLaunchOutput, error) + EnableFastLaunchWithContext(aws.Context, *ec2.EnableFastLaunchInput, ...request.Option) (*ec2.EnableFastLaunchOutput, error) + EnableFastLaunchRequest(*ec2.EnableFastLaunchInput) (*request.Request, *ec2.EnableFastLaunchOutput) + + EnableFastSnapshotRestores(*ec2.EnableFastSnapshotRestoresInput) (*ec2.EnableFastSnapshotRestoresOutput, error) + EnableFastSnapshotRestoresWithContext(aws.Context, *ec2.EnableFastSnapshotRestoresInput, ...request.Option) (*ec2.EnableFastSnapshotRestoresOutput, error) + EnableFastSnapshotRestoresRequest(*ec2.EnableFastSnapshotRestoresInput) (*request.Request, *ec2.EnableFastSnapshotRestoresOutput) + + EnableImage(*ec2.EnableImageInput) (*ec2.EnableImageOutput, error) + EnableImageWithContext(aws.Context, *ec2.EnableImageInput, ...request.Option) (*ec2.EnableImageOutput, error) + EnableImageRequest(*ec2.EnableImageInput) (*request.Request, *ec2.EnableImageOutput) + + EnableImageBlockPublicAccess(*ec2.EnableImageBlockPublicAccessInput) (*ec2.EnableImageBlockPublicAccessOutput, error) + EnableImageBlockPublicAccessWithContext(aws.Context, *ec2.EnableImageBlockPublicAccessInput, ...request.Option) (*ec2.EnableImageBlockPublicAccessOutput, error) + EnableImageBlockPublicAccessRequest(*ec2.EnableImageBlockPublicAccessInput) (*request.Request, *ec2.EnableImageBlockPublicAccessOutput) + + EnableImageDeprecation(*ec2.EnableImageDeprecationInput) (*ec2.EnableImageDeprecationOutput, error) + EnableImageDeprecationWithContext(aws.Context, *ec2.EnableImageDeprecationInput, ...request.Option) (*ec2.EnableImageDeprecationOutput, error) + EnableImageDeprecationRequest(*ec2.EnableImageDeprecationInput) (*request.Request, *ec2.EnableImageDeprecationOutput) + + EnableImageDeregistrationProtection(*ec2.EnableImageDeregistrationProtectionInput) (*ec2.EnableImageDeregistrationProtectionOutput, error) + EnableImageDeregistrationProtectionWithContext(aws.Context, *ec2.EnableImageDeregistrationProtectionInput, ...request.Option) (*ec2.EnableImageDeregistrationProtectionOutput, error) + EnableImageDeregistrationProtectionRequest(*ec2.EnableImageDeregistrationProtectionInput) (*request.Request, *ec2.EnableImageDeregistrationProtectionOutput) + + EnableIpamOrganizationAdminAccount(*ec2.EnableIpamOrganizationAdminAccountInput) (*ec2.EnableIpamOrganizationAdminAccountOutput, error) + EnableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.EnableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.EnableIpamOrganizationAdminAccountOutput, error) + EnableIpamOrganizationAdminAccountRequest(*ec2.EnableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.EnableIpamOrganizationAdminAccountOutput) + + EnableReachabilityAnalyzerOrganizationSharing(*ec2.EnableReachabilityAnalyzerOrganizationSharingInput) (*ec2.EnableReachabilityAnalyzerOrganizationSharingOutput, error) + EnableReachabilityAnalyzerOrganizationSharingWithContext(aws.Context, *ec2.EnableReachabilityAnalyzerOrganizationSharingInput, ...request.Option) (*ec2.EnableReachabilityAnalyzerOrganizationSharingOutput, error) + EnableReachabilityAnalyzerOrganizationSharingRequest(*ec2.EnableReachabilityAnalyzerOrganizationSharingInput) (*request.Request, *ec2.EnableReachabilityAnalyzerOrganizationSharingOutput) + + EnableSerialConsoleAccess(*ec2.EnableSerialConsoleAccessInput) (*ec2.EnableSerialConsoleAccessOutput, error) + EnableSerialConsoleAccessWithContext(aws.Context, *ec2.EnableSerialConsoleAccessInput, ...request.Option) (*ec2.EnableSerialConsoleAccessOutput, error) + EnableSerialConsoleAccessRequest(*ec2.EnableSerialConsoleAccessInput) (*request.Request, *ec2.EnableSerialConsoleAccessOutput) + + EnableSnapshotBlockPublicAccess(*ec2.EnableSnapshotBlockPublicAccessInput) (*ec2.EnableSnapshotBlockPublicAccessOutput, error) + EnableSnapshotBlockPublicAccessWithContext(aws.Context, *ec2.EnableSnapshotBlockPublicAccessInput, ...request.Option) (*ec2.EnableSnapshotBlockPublicAccessOutput, error) + EnableSnapshotBlockPublicAccessRequest(*ec2.EnableSnapshotBlockPublicAccessInput) (*request.Request, *ec2.EnableSnapshotBlockPublicAccessOutput) + + EnableTransitGatewayRouteTablePropagation(*ec2.EnableTransitGatewayRouteTablePropagationInput) (*ec2.EnableTransitGatewayRouteTablePropagationOutput, error) + EnableTransitGatewayRouteTablePropagationWithContext(aws.Context, *ec2.EnableTransitGatewayRouteTablePropagationInput, ...request.Option) (*ec2.EnableTransitGatewayRouteTablePropagationOutput, error) + EnableTransitGatewayRouteTablePropagationRequest(*ec2.EnableTransitGatewayRouteTablePropagationInput) (*request.Request, *ec2.EnableTransitGatewayRouteTablePropagationOutput) + + EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error) + EnableVgwRoutePropagationWithContext(aws.Context, *ec2.EnableVgwRoutePropagationInput, ...request.Option) (*ec2.EnableVgwRoutePropagationOutput, error) + EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput) + + EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error) + EnableVolumeIOWithContext(aws.Context, *ec2.EnableVolumeIOInput, ...request.Option) (*ec2.EnableVolumeIOOutput, error) + EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput) + + EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error) + EnableVpcClassicLinkWithContext(aws.Context, *ec2.EnableVpcClassicLinkInput, ...request.Option) (*ec2.EnableVpcClassicLinkOutput, error) + EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput) + + EnableVpcClassicLinkDnsSupport(*ec2.EnableVpcClassicLinkDnsSupportInput) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error) + EnableVpcClassicLinkDnsSupportWithContext(aws.Context, *ec2.EnableVpcClassicLinkDnsSupportInput, ...request.Option) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error) + EnableVpcClassicLinkDnsSupportRequest(*ec2.EnableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.EnableVpcClassicLinkDnsSupportOutput) + + ExportClientVpnClientCertificateRevocationList(*ec2.ExportClientVpnClientCertificateRevocationListInput) (*ec2.ExportClientVpnClientCertificateRevocationListOutput, error) + ExportClientVpnClientCertificateRevocationListWithContext(aws.Context, *ec2.ExportClientVpnClientCertificateRevocationListInput, ...request.Option) (*ec2.ExportClientVpnClientCertificateRevocationListOutput, error) + ExportClientVpnClientCertificateRevocationListRequest(*ec2.ExportClientVpnClientCertificateRevocationListInput) (*request.Request, *ec2.ExportClientVpnClientCertificateRevocationListOutput) + + ExportClientVpnClientConfiguration(*ec2.ExportClientVpnClientConfigurationInput) (*ec2.ExportClientVpnClientConfigurationOutput, error) + ExportClientVpnClientConfigurationWithContext(aws.Context, *ec2.ExportClientVpnClientConfigurationInput, ...request.Option) (*ec2.ExportClientVpnClientConfigurationOutput, error) + ExportClientVpnClientConfigurationRequest(*ec2.ExportClientVpnClientConfigurationInput) (*request.Request, *ec2.ExportClientVpnClientConfigurationOutput) + + ExportImage(*ec2.ExportImageInput) (*ec2.ExportImageOutput, error) + ExportImageWithContext(aws.Context, *ec2.ExportImageInput, ...request.Option) (*ec2.ExportImageOutput, error) + ExportImageRequest(*ec2.ExportImageInput) (*request.Request, *ec2.ExportImageOutput) + + ExportTransitGatewayRoutes(*ec2.ExportTransitGatewayRoutesInput) (*ec2.ExportTransitGatewayRoutesOutput, error) + ExportTransitGatewayRoutesWithContext(aws.Context, *ec2.ExportTransitGatewayRoutesInput, ...request.Option) (*ec2.ExportTransitGatewayRoutesOutput, error) + ExportTransitGatewayRoutesRequest(*ec2.ExportTransitGatewayRoutesInput) (*request.Request, *ec2.ExportTransitGatewayRoutesOutput) + + GetAssociatedEnclaveCertificateIamRoles(*ec2.GetAssociatedEnclaveCertificateIamRolesInput) (*ec2.GetAssociatedEnclaveCertificateIamRolesOutput, error) + GetAssociatedEnclaveCertificateIamRolesWithContext(aws.Context, *ec2.GetAssociatedEnclaveCertificateIamRolesInput, ...request.Option) (*ec2.GetAssociatedEnclaveCertificateIamRolesOutput, error) + GetAssociatedEnclaveCertificateIamRolesRequest(*ec2.GetAssociatedEnclaveCertificateIamRolesInput) (*request.Request, *ec2.GetAssociatedEnclaveCertificateIamRolesOutput) + + GetAssociatedIpv6PoolCidrs(*ec2.GetAssociatedIpv6PoolCidrsInput) (*ec2.GetAssociatedIpv6PoolCidrsOutput, error) + GetAssociatedIpv6PoolCidrsWithContext(aws.Context, *ec2.GetAssociatedIpv6PoolCidrsInput, ...request.Option) (*ec2.GetAssociatedIpv6PoolCidrsOutput, error) + GetAssociatedIpv6PoolCidrsRequest(*ec2.GetAssociatedIpv6PoolCidrsInput) (*request.Request, *ec2.GetAssociatedIpv6PoolCidrsOutput) + + GetAssociatedIpv6PoolCidrsPages(*ec2.GetAssociatedIpv6PoolCidrsInput, func(*ec2.GetAssociatedIpv6PoolCidrsOutput, bool) bool) error + GetAssociatedIpv6PoolCidrsPagesWithContext(aws.Context, *ec2.GetAssociatedIpv6PoolCidrsInput, func(*ec2.GetAssociatedIpv6PoolCidrsOutput, bool) bool, ...request.Option) error + + GetAwsNetworkPerformanceData(*ec2.GetAwsNetworkPerformanceDataInput) (*ec2.GetAwsNetworkPerformanceDataOutput, error) + GetAwsNetworkPerformanceDataWithContext(aws.Context, *ec2.GetAwsNetworkPerformanceDataInput, ...request.Option) (*ec2.GetAwsNetworkPerformanceDataOutput, error) + GetAwsNetworkPerformanceDataRequest(*ec2.GetAwsNetworkPerformanceDataInput) (*request.Request, *ec2.GetAwsNetworkPerformanceDataOutput) + + GetAwsNetworkPerformanceDataPages(*ec2.GetAwsNetworkPerformanceDataInput, func(*ec2.GetAwsNetworkPerformanceDataOutput, bool) bool) error + GetAwsNetworkPerformanceDataPagesWithContext(aws.Context, *ec2.GetAwsNetworkPerformanceDataInput, func(*ec2.GetAwsNetworkPerformanceDataOutput, bool) bool, ...request.Option) error + + GetCapacityReservationUsage(*ec2.GetCapacityReservationUsageInput) (*ec2.GetCapacityReservationUsageOutput, error) + GetCapacityReservationUsageWithContext(aws.Context, *ec2.GetCapacityReservationUsageInput, ...request.Option) (*ec2.GetCapacityReservationUsageOutput, error) + GetCapacityReservationUsageRequest(*ec2.GetCapacityReservationUsageInput) (*request.Request, *ec2.GetCapacityReservationUsageOutput) + + GetCoipPoolUsage(*ec2.GetCoipPoolUsageInput) (*ec2.GetCoipPoolUsageOutput, error) + GetCoipPoolUsageWithContext(aws.Context, *ec2.GetCoipPoolUsageInput, ...request.Option) (*ec2.GetCoipPoolUsageOutput, error) + GetCoipPoolUsageRequest(*ec2.GetCoipPoolUsageInput) (*request.Request, *ec2.GetCoipPoolUsageOutput) + + GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error) + GetConsoleOutputWithContext(aws.Context, *ec2.GetConsoleOutputInput, ...request.Option) (*ec2.GetConsoleOutputOutput, error) + GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput) + + GetConsoleScreenshot(*ec2.GetConsoleScreenshotInput) (*ec2.GetConsoleScreenshotOutput, error) + GetConsoleScreenshotWithContext(aws.Context, *ec2.GetConsoleScreenshotInput, ...request.Option) (*ec2.GetConsoleScreenshotOutput, error) + GetConsoleScreenshotRequest(*ec2.GetConsoleScreenshotInput) (*request.Request, *ec2.GetConsoleScreenshotOutput) + + GetDefaultCreditSpecification(*ec2.GetDefaultCreditSpecificationInput) (*ec2.GetDefaultCreditSpecificationOutput, error) + GetDefaultCreditSpecificationWithContext(aws.Context, *ec2.GetDefaultCreditSpecificationInput, ...request.Option) (*ec2.GetDefaultCreditSpecificationOutput, error) + GetDefaultCreditSpecificationRequest(*ec2.GetDefaultCreditSpecificationInput) (*request.Request, *ec2.GetDefaultCreditSpecificationOutput) + + GetEbsDefaultKmsKeyId(*ec2.GetEbsDefaultKmsKeyIdInput) (*ec2.GetEbsDefaultKmsKeyIdOutput, error) + GetEbsDefaultKmsKeyIdWithContext(aws.Context, *ec2.GetEbsDefaultKmsKeyIdInput, ...request.Option) (*ec2.GetEbsDefaultKmsKeyIdOutput, error) + GetEbsDefaultKmsKeyIdRequest(*ec2.GetEbsDefaultKmsKeyIdInput) (*request.Request, *ec2.GetEbsDefaultKmsKeyIdOutput) + + GetEbsEncryptionByDefault(*ec2.GetEbsEncryptionByDefaultInput) (*ec2.GetEbsEncryptionByDefaultOutput, error) + GetEbsEncryptionByDefaultWithContext(aws.Context, *ec2.GetEbsEncryptionByDefaultInput, ...request.Option) (*ec2.GetEbsEncryptionByDefaultOutput, error) + GetEbsEncryptionByDefaultRequest(*ec2.GetEbsEncryptionByDefaultInput) (*request.Request, *ec2.GetEbsEncryptionByDefaultOutput) + + GetFlowLogsIntegrationTemplate(*ec2.GetFlowLogsIntegrationTemplateInput) (*ec2.GetFlowLogsIntegrationTemplateOutput, error) + GetFlowLogsIntegrationTemplateWithContext(aws.Context, *ec2.GetFlowLogsIntegrationTemplateInput, ...request.Option) (*ec2.GetFlowLogsIntegrationTemplateOutput, error) + GetFlowLogsIntegrationTemplateRequest(*ec2.GetFlowLogsIntegrationTemplateInput) (*request.Request, *ec2.GetFlowLogsIntegrationTemplateOutput) + + GetGroupsForCapacityReservation(*ec2.GetGroupsForCapacityReservationInput) (*ec2.GetGroupsForCapacityReservationOutput, error) + GetGroupsForCapacityReservationWithContext(aws.Context, *ec2.GetGroupsForCapacityReservationInput, ...request.Option) (*ec2.GetGroupsForCapacityReservationOutput, error) + GetGroupsForCapacityReservationRequest(*ec2.GetGroupsForCapacityReservationInput) (*request.Request, *ec2.GetGroupsForCapacityReservationOutput) + + GetGroupsForCapacityReservationPages(*ec2.GetGroupsForCapacityReservationInput, func(*ec2.GetGroupsForCapacityReservationOutput, bool) bool) error + GetGroupsForCapacityReservationPagesWithContext(aws.Context, *ec2.GetGroupsForCapacityReservationInput, func(*ec2.GetGroupsForCapacityReservationOutput, bool) bool, ...request.Option) error + + GetHostReservationPurchasePreview(*ec2.GetHostReservationPurchasePreviewInput) (*ec2.GetHostReservationPurchasePreviewOutput, error) + GetHostReservationPurchasePreviewWithContext(aws.Context, *ec2.GetHostReservationPurchasePreviewInput, ...request.Option) (*ec2.GetHostReservationPurchasePreviewOutput, error) + GetHostReservationPurchasePreviewRequest(*ec2.GetHostReservationPurchasePreviewInput) (*request.Request, *ec2.GetHostReservationPurchasePreviewOutput) + + GetImageBlockPublicAccessState(*ec2.GetImageBlockPublicAccessStateInput) (*ec2.GetImageBlockPublicAccessStateOutput, error) + GetImageBlockPublicAccessStateWithContext(aws.Context, *ec2.GetImageBlockPublicAccessStateInput, ...request.Option) (*ec2.GetImageBlockPublicAccessStateOutput, error) + GetImageBlockPublicAccessStateRequest(*ec2.GetImageBlockPublicAccessStateInput) (*request.Request, *ec2.GetImageBlockPublicAccessStateOutput) + + GetInstanceMetadataDefaults(*ec2.GetInstanceMetadataDefaultsInput) (*ec2.GetInstanceMetadataDefaultsOutput, error) + GetInstanceMetadataDefaultsWithContext(aws.Context, *ec2.GetInstanceMetadataDefaultsInput, ...request.Option) (*ec2.GetInstanceMetadataDefaultsOutput, error) + GetInstanceMetadataDefaultsRequest(*ec2.GetInstanceMetadataDefaultsInput) (*request.Request, *ec2.GetInstanceMetadataDefaultsOutput) + + GetInstanceTpmEkPub(*ec2.GetInstanceTpmEkPubInput) (*ec2.GetInstanceTpmEkPubOutput, error) + GetInstanceTpmEkPubWithContext(aws.Context, *ec2.GetInstanceTpmEkPubInput, ...request.Option) (*ec2.GetInstanceTpmEkPubOutput, error) + GetInstanceTpmEkPubRequest(*ec2.GetInstanceTpmEkPubInput) (*request.Request, *ec2.GetInstanceTpmEkPubOutput) + + GetInstanceTypesFromInstanceRequirements(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error) + GetInstanceTypesFromInstanceRequirementsWithContext(aws.Context, *ec2.GetInstanceTypesFromInstanceRequirementsInput, ...request.Option) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error) + GetInstanceTypesFromInstanceRequirementsRequest(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*request.Request, *ec2.GetInstanceTypesFromInstanceRequirementsOutput) + + GetInstanceTypesFromInstanceRequirementsPages(*ec2.GetInstanceTypesFromInstanceRequirementsInput, func(*ec2.GetInstanceTypesFromInstanceRequirementsOutput, bool) bool) error + GetInstanceTypesFromInstanceRequirementsPagesWithContext(aws.Context, *ec2.GetInstanceTypesFromInstanceRequirementsInput, func(*ec2.GetInstanceTypesFromInstanceRequirementsOutput, bool) bool, ...request.Option) error + + GetInstanceUefiData(*ec2.GetInstanceUefiDataInput) (*ec2.GetInstanceUefiDataOutput, error) + GetInstanceUefiDataWithContext(aws.Context, *ec2.GetInstanceUefiDataInput, ...request.Option) (*ec2.GetInstanceUefiDataOutput, error) + GetInstanceUefiDataRequest(*ec2.GetInstanceUefiDataInput) (*request.Request, *ec2.GetInstanceUefiDataOutput) + + GetIpamAddressHistory(*ec2.GetIpamAddressHistoryInput) (*ec2.GetIpamAddressHistoryOutput, error) + GetIpamAddressHistoryWithContext(aws.Context, *ec2.GetIpamAddressHistoryInput, ...request.Option) (*ec2.GetIpamAddressHistoryOutput, error) + GetIpamAddressHistoryRequest(*ec2.GetIpamAddressHistoryInput) (*request.Request, *ec2.GetIpamAddressHistoryOutput) + + GetIpamAddressHistoryPages(*ec2.GetIpamAddressHistoryInput, func(*ec2.GetIpamAddressHistoryOutput, bool) bool) error + GetIpamAddressHistoryPagesWithContext(aws.Context, *ec2.GetIpamAddressHistoryInput, func(*ec2.GetIpamAddressHistoryOutput, bool) bool, ...request.Option) error + + GetIpamDiscoveredAccounts(*ec2.GetIpamDiscoveredAccountsInput) (*ec2.GetIpamDiscoveredAccountsOutput, error) + GetIpamDiscoveredAccountsWithContext(aws.Context, *ec2.GetIpamDiscoveredAccountsInput, ...request.Option) (*ec2.GetIpamDiscoveredAccountsOutput, error) + GetIpamDiscoveredAccountsRequest(*ec2.GetIpamDiscoveredAccountsInput) (*request.Request, *ec2.GetIpamDiscoveredAccountsOutput) + + GetIpamDiscoveredAccountsPages(*ec2.GetIpamDiscoveredAccountsInput, func(*ec2.GetIpamDiscoveredAccountsOutput, bool) bool) error + GetIpamDiscoveredAccountsPagesWithContext(aws.Context, *ec2.GetIpamDiscoveredAccountsInput, func(*ec2.GetIpamDiscoveredAccountsOutput, bool) bool, ...request.Option) error + + GetIpamDiscoveredPublicAddresses(*ec2.GetIpamDiscoveredPublicAddressesInput) (*ec2.GetIpamDiscoveredPublicAddressesOutput, error) + GetIpamDiscoveredPublicAddressesWithContext(aws.Context, *ec2.GetIpamDiscoveredPublicAddressesInput, ...request.Option) (*ec2.GetIpamDiscoveredPublicAddressesOutput, error) + GetIpamDiscoveredPublicAddressesRequest(*ec2.GetIpamDiscoveredPublicAddressesInput) (*request.Request, *ec2.GetIpamDiscoveredPublicAddressesOutput) + + GetIpamDiscoveredResourceCidrs(*ec2.GetIpamDiscoveredResourceCidrsInput) (*ec2.GetIpamDiscoveredResourceCidrsOutput, error) + GetIpamDiscoveredResourceCidrsWithContext(aws.Context, *ec2.GetIpamDiscoveredResourceCidrsInput, ...request.Option) (*ec2.GetIpamDiscoveredResourceCidrsOutput, error) + GetIpamDiscoveredResourceCidrsRequest(*ec2.GetIpamDiscoveredResourceCidrsInput) (*request.Request, *ec2.GetIpamDiscoveredResourceCidrsOutput) + + GetIpamDiscoveredResourceCidrsPages(*ec2.GetIpamDiscoveredResourceCidrsInput, func(*ec2.GetIpamDiscoveredResourceCidrsOutput, bool) bool) error + GetIpamDiscoveredResourceCidrsPagesWithContext(aws.Context, *ec2.GetIpamDiscoveredResourceCidrsInput, func(*ec2.GetIpamDiscoveredResourceCidrsOutput, bool) bool, ...request.Option) error + + GetIpamPoolAllocations(*ec2.GetIpamPoolAllocationsInput) (*ec2.GetIpamPoolAllocationsOutput, error) + GetIpamPoolAllocationsWithContext(aws.Context, *ec2.GetIpamPoolAllocationsInput, ...request.Option) (*ec2.GetIpamPoolAllocationsOutput, error) + GetIpamPoolAllocationsRequest(*ec2.GetIpamPoolAllocationsInput) (*request.Request, *ec2.GetIpamPoolAllocationsOutput) + + GetIpamPoolAllocationsPages(*ec2.GetIpamPoolAllocationsInput, func(*ec2.GetIpamPoolAllocationsOutput, bool) bool) error + GetIpamPoolAllocationsPagesWithContext(aws.Context, *ec2.GetIpamPoolAllocationsInput, func(*ec2.GetIpamPoolAllocationsOutput, bool) bool, ...request.Option) error + + GetIpamPoolCidrs(*ec2.GetIpamPoolCidrsInput) (*ec2.GetIpamPoolCidrsOutput, error) + GetIpamPoolCidrsWithContext(aws.Context, *ec2.GetIpamPoolCidrsInput, ...request.Option) (*ec2.GetIpamPoolCidrsOutput, error) + GetIpamPoolCidrsRequest(*ec2.GetIpamPoolCidrsInput) (*request.Request, *ec2.GetIpamPoolCidrsOutput) + + GetIpamPoolCidrsPages(*ec2.GetIpamPoolCidrsInput, func(*ec2.GetIpamPoolCidrsOutput, bool) bool) error + GetIpamPoolCidrsPagesWithContext(aws.Context, *ec2.GetIpamPoolCidrsInput, func(*ec2.GetIpamPoolCidrsOutput, bool) bool, ...request.Option) error + + GetIpamResourceCidrs(*ec2.GetIpamResourceCidrsInput) (*ec2.GetIpamResourceCidrsOutput, error) + GetIpamResourceCidrsWithContext(aws.Context, *ec2.GetIpamResourceCidrsInput, ...request.Option) (*ec2.GetIpamResourceCidrsOutput, error) + GetIpamResourceCidrsRequest(*ec2.GetIpamResourceCidrsInput) (*request.Request, *ec2.GetIpamResourceCidrsOutput) + + GetIpamResourceCidrsPages(*ec2.GetIpamResourceCidrsInput, func(*ec2.GetIpamResourceCidrsOutput, bool) bool) error + GetIpamResourceCidrsPagesWithContext(aws.Context, *ec2.GetIpamResourceCidrsInput, func(*ec2.GetIpamResourceCidrsOutput, bool) bool, ...request.Option) error + + GetLaunchTemplateData(*ec2.GetLaunchTemplateDataInput) (*ec2.GetLaunchTemplateDataOutput, error) + GetLaunchTemplateDataWithContext(aws.Context, *ec2.GetLaunchTemplateDataInput, ...request.Option) (*ec2.GetLaunchTemplateDataOutput, error) + GetLaunchTemplateDataRequest(*ec2.GetLaunchTemplateDataInput) (*request.Request, *ec2.GetLaunchTemplateDataOutput) + + GetManagedPrefixListAssociations(*ec2.GetManagedPrefixListAssociationsInput) (*ec2.GetManagedPrefixListAssociationsOutput, error) + GetManagedPrefixListAssociationsWithContext(aws.Context, *ec2.GetManagedPrefixListAssociationsInput, ...request.Option) (*ec2.GetManagedPrefixListAssociationsOutput, error) + GetManagedPrefixListAssociationsRequest(*ec2.GetManagedPrefixListAssociationsInput) (*request.Request, *ec2.GetManagedPrefixListAssociationsOutput) + + GetManagedPrefixListAssociationsPages(*ec2.GetManagedPrefixListAssociationsInput, func(*ec2.GetManagedPrefixListAssociationsOutput, bool) bool) error + GetManagedPrefixListAssociationsPagesWithContext(aws.Context, *ec2.GetManagedPrefixListAssociationsInput, func(*ec2.GetManagedPrefixListAssociationsOutput, bool) bool, ...request.Option) error + + GetManagedPrefixListEntries(*ec2.GetManagedPrefixListEntriesInput) (*ec2.GetManagedPrefixListEntriesOutput, error) + GetManagedPrefixListEntriesWithContext(aws.Context, *ec2.GetManagedPrefixListEntriesInput, ...request.Option) (*ec2.GetManagedPrefixListEntriesOutput, error) + GetManagedPrefixListEntriesRequest(*ec2.GetManagedPrefixListEntriesInput) (*request.Request, *ec2.GetManagedPrefixListEntriesOutput) + + GetManagedPrefixListEntriesPages(*ec2.GetManagedPrefixListEntriesInput, func(*ec2.GetManagedPrefixListEntriesOutput, bool) bool) error + GetManagedPrefixListEntriesPagesWithContext(aws.Context, *ec2.GetManagedPrefixListEntriesInput, func(*ec2.GetManagedPrefixListEntriesOutput, bool) bool, ...request.Option) error + + GetNetworkInsightsAccessScopeAnalysisFindings(*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput) (*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput, error) + GetNetworkInsightsAccessScopeAnalysisFindingsWithContext(aws.Context, *ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput, ...request.Option) (*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput, error) + GetNetworkInsightsAccessScopeAnalysisFindingsRequest(*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput) (*request.Request, *ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput) + + GetNetworkInsightsAccessScopeAnalysisFindingsPages(*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput, func(*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput, bool) bool) error + GetNetworkInsightsAccessScopeAnalysisFindingsPagesWithContext(aws.Context, *ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput, func(*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput, bool) bool, ...request.Option) error + + GetNetworkInsightsAccessScopeContent(*ec2.GetNetworkInsightsAccessScopeContentInput) (*ec2.GetNetworkInsightsAccessScopeContentOutput, error) + GetNetworkInsightsAccessScopeContentWithContext(aws.Context, *ec2.GetNetworkInsightsAccessScopeContentInput, ...request.Option) (*ec2.GetNetworkInsightsAccessScopeContentOutput, error) + GetNetworkInsightsAccessScopeContentRequest(*ec2.GetNetworkInsightsAccessScopeContentInput) (*request.Request, *ec2.GetNetworkInsightsAccessScopeContentOutput) + + GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error) + GetPasswordDataWithContext(aws.Context, *ec2.GetPasswordDataInput, ...request.Option) (*ec2.GetPasswordDataOutput, error) + GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput) + + GetReservedInstancesExchangeQuote(*ec2.GetReservedInstancesExchangeQuoteInput) (*ec2.GetReservedInstancesExchangeQuoteOutput, error) + GetReservedInstancesExchangeQuoteWithContext(aws.Context, *ec2.GetReservedInstancesExchangeQuoteInput, ...request.Option) (*ec2.GetReservedInstancesExchangeQuoteOutput, error) + GetReservedInstancesExchangeQuoteRequest(*ec2.GetReservedInstancesExchangeQuoteInput) (*request.Request, *ec2.GetReservedInstancesExchangeQuoteOutput) + + GetSecurityGroupsForVpc(*ec2.GetSecurityGroupsForVpcInput) (*ec2.GetSecurityGroupsForVpcOutput, error) + GetSecurityGroupsForVpcWithContext(aws.Context, *ec2.GetSecurityGroupsForVpcInput, ...request.Option) (*ec2.GetSecurityGroupsForVpcOutput, error) + GetSecurityGroupsForVpcRequest(*ec2.GetSecurityGroupsForVpcInput) (*request.Request, *ec2.GetSecurityGroupsForVpcOutput) + + GetSecurityGroupsForVpcPages(*ec2.GetSecurityGroupsForVpcInput, func(*ec2.GetSecurityGroupsForVpcOutput, bool) bool) error + GetSecurityGroupsForVpcPagesWithContext(aws.Context, *ec2.GetSecurityGroupsForVpcInput, func(*ec2.GetSecurityGroupsForVpcOutput, bool) bool, ...request.Option) error + + GetSerialConsoleAccessStatus(*ec2.GetSerialConsoleAccessStatusInput) (*ec2.GetSerialConsoleAccessStatusOutput, error) + GetSerialConsoleAccessStatusWithContext(aws.Context, *ec2.GetSerialConsoleAccessStatusInput, ...request.Option) (*ec2.GetSerialConsoleAccessStatusOutput, error) + GetSerialConsoleAccessStatusRequest(*ec2.GetSerialConsoleAccessStatusInput) (*request.Request, *ec2.GetSerialConsoleAccessStatusOutput) + + GetSnapshotBlockPublicAccessState(*ec2.GetSnapshotBlockPublicAccessStateInput) (*ec2.GetSnapshotBlockPublicAccessStateOutput, error) + GetSnapshotBlockPublicAccessStateWithContext(aws.Context, *ec2.GetSnapshotBlockPublicAccessStateInput, ...request.Option) (*ec2.GetSnapshotBlockPublicAccessStateOutput, error) + GetSnapshotBlockPublicAccessStateRequest(*ec2.GetSnapshotBlockPublicAccessStateInput) (*request.Request, *ec2.GetSnapshotBlockPublicAccessStateOutput) + + GetSpotPlacementScores(*ec2.GetSpotPlacementScoresInput) (*ec2.GetSpotPlacementScoresOutput, error) + GetSpotPlacementScoresWithContext(aws.Context, *ec2.GetSpotPlacementScoresInput, ...request.Option) (*ec2.GetSpotPlacementScoresOutput, error) + GetSpotPlacementScoresRequest(*ec2.GetSpotPlacementScoresInput) (*request.Request, *ec2.GetSpotPlacementScoresOutput) + + GetSpotPlacementScoresPages(*ec2.GetSpotPlacementScoresInput, func(*ec2.GetSpotPlacementScoresOutput, bool) bool) error + GetSpotPlacementScoresPagesWithContext(aws.Context, *ec2.GetSpotPlacementScoresInput, func(*ec2.GetSpotPlacementScoresOutput, bool) bool, ...request.Option) error + + GetSubnetCidrReservations(*ec2.GetSubnetCidrReservationsInput) (*ec2.GetSubnetCidrReservationsOutput, error) + GetSubnetCidrReservationsWithContext(aws.Context, *ec2.GetSubnetCidrReservationsInput, ...request.Option) (*ec2.GetSubnetCidrReservationsOutput, error) + GetSubnetCidrReservationsRequest(*ec2.GetSubnetCidrReservationsInput) (*request.Request, *ec2.GetSubnetCidrReservationsOutput) + + GetTransitGatewayAttachmentPropagations(*ec2.GetTransitGatewayAttachmentPropagationsInput) (*ec2.GetTransitGatewayAttachmentPropagationsOutput, error) + GetTransitGatewayAttachmentPropagationsWithContext(aws.Context, *ec2.GetTransitGatewayAttachmentPropagationsInput, ...request.Option) (*ec2.GetTransitGatewayAttachmentPropagationsOutput, error) + GetTransitGatewayAttachmentPropagationsRequest(*ec2.GetTransitGatewayAttachmentPropagationsInput) (*request.Request, *ec2.GetTransitGatewayAttachmentPropagationsOutput) + + GetTransitGatewayAttachmentPropagationsPages(*ec2.GetTransitGatewayAttachmentPropagationsInput, func(*ec2.GetTransitGatewayAttachmentPropagationsOutput, bool) bool) error + GetTransitGatewayAttachmentPropagationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayAttachmentPropagationsInput, func(*ec2.GetTransitGatewayAttachmentPropagationsOutput, bool) bool, ...request.Option) error + + GetTransitGatewayMulticastDomainAssociations(*ec2.GetTransitGatewayMulticastDomainAssociationsInput) (*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, error) + GetTransitGatewayMulticastDomainAssociationsWithContext(aws.Context, *ec2.GetTransitGatewayMulticastDomainAssociationsInput, ...request.Option) (*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, error) + GetTransitGatewayMulticastDomainAssociationsRequest(*ec2.GetTransitGatewayMulticastDomainAssociationsInput) (*request.Request, *ec2.GetTransitGatewayMulticastDomainAssociationsOutput) + + GetTransitGatewayMulticastDomainAssociationsPages(*ec2.GetTransitGatewayMulticastDomainAssociationsInput, func(*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool) error + GetTransitGatewayMulticastDomainAssociationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayMulticastDomainAssociationsInput, func(*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool, ...request.Option) error + + GetTransitGatewayPolicyTableAssociations(*ec2.GetTransitGatewayPolicyTableAssociationsInput) (*ec2.GetTransitGatewayPolicyTableAssociationsOutput, error) + GetTransitGatewayPolicyTableAssociationsWithContext(aws.Context, *ec2.GetTransitGatewayPolicyTableAssociationsInput, ...request.Option) (*ec2.GetTransitGatewayPolicyTableAssociationsOutput, error) + GetTransitGatewayPolicyTableAssociationsRequest(*ec2.GetTransitGatewayPolicyTableAssociationsInput) (*request.Request, *ec2.GetTransitGatewayPolicyTableAssociationsOutput) + + GetTransitGatewayPolicyTableAssociationsPages(*ec2.GetTransitGatewayPolicyTableAssociationsInput, func(*ec2.GetTransitGatewayPolicyTableAssociationsOutput, bool) bool) error + GetTransitGatewayPolicyTableAssociationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayPolicyTableAssociationsInput, func(*ec2.GetTransitGatewayPolicyTableAssociationsOutput, bool) bool, ...request.Option) error + + GetTransitGatewayPolicyTableEntries(*ec2.GetTransitGatewayPolicyTableEntriesInput) (*ec2.GetTransitGatewayPolicyTableEntriesOutput, error) + GetTransitGatewayPolicyTableEntriesWithContext(aws.Context, *ec2.GetTransitGatewayPolicyTableEntriesInput, ...request.Option) (*ec2.GetTransitGatewayPolicyTableEntriesOutput, error) + GetTransitGatewayPolicyTableEntriesRequest(*ec2.GetTransitGatewayPolicyTableEntriesInput) (*request.Request, *ec2.GetTransitGatewayPolicyTableEntriesOutput) + + GetTransitGatewayPrefixListReferences(*ec2.GetTransitGatewayPrefixListReferencesInput) (*ec2.GetTransitGatewayPrefixListReferencesOutput, error) + GetTransitGatewayPrefixListReferencesWithContext(aws.Context, *ec2.GetTransitGatewayPrefixListReferencesInput, ...request.Option) (*ec2.GetTransitGatewayPrefixListReferencesOutput, error) + GetTransitGatewayPrefixListReferencesRequest(*ec2.GetTransitGatewayPrefixListReferencesInput) (*request.Request, *ec2.GetTransitGatewayPrefixListReferencesOutput) + + GetTransitGatewayPrefixListReferencesPages(*ec2.GetTransitGatewayPrefixListReferencesInput, func(*ec2.GetTransitGatewayPrefixListReferencesOutput, bool) bool) error + GetTransitGatewayPrefixListReferencesPagesWithContext(aws.Context, *ec2.GetTransitGatewayPrefixListReferencesInput, func(*ec2.GetTransitGatewayPrefixListReferencesOutput, bool) bool, ...request.Option) error + + GetTransitGatewayRouteTableAssociations(*ec2.GetTransitGatewayRouteTableAssociationsInput) (*ec2.GetTransitGatewayRouteTableAssociationsOutput, error) + GetTransitGatewayRouteTableAssociationsWithContext(aws.Context, *ec2.GetTransitGatewayRouteTableAssociationsInput, ...request.Option) (*ec2.GetTransitGatewayRouteTableAssociationsOutput, error) + GetTransitGatewayRouteTableAssociationsRequest(*ec2.GetTransitGatewayRouteTableAssociationsInput) (*request.Request, *ec2.GetTransitGatewayRouteTableAssociationsOutput) + + GetTransitGatewayRouteTableAssociationsPages(*ec2.GetTransitGatewayRouteTableAssociationsInput, func(*ec2.GetTransitGatewayRouteTableAssociationsOutput, bool) bool) error + GetTransitGatewayRouteTableAssociationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayRouteTableAssociationsInput, func(*ec2.GetTransitGatewayRouteTableAssociationsOutput, bool) bool, ...request.Option) error + + GetTransitGatewayRouteTablePropagations(*ec2.GetTransitGatewayRouteTablePropagationsInput) (*ec2.GetTransitGatewayRouteTablePropagationsOutput, error) + GetTransitGatewayRouteTablePropagationsWithContext(aws.Context, *ec2.GetTransitGatewayRouteTablePropagationsInput, ...request.Option) (*ec2.GetTransitGatewayRouteTablePropagationsOutput, error) + GetTransitGatewayRouteTablePropagationsRequest(*ec2.GetTransitGatewayRouteTablePropagationsInput) (*request.Request, *ec2.GetTransitGatewayRouteTablePropagationsOutput) + + GetTransitGatewayRouteTablePropagationsPages(*ec2.GetTransitGatewayRouteTablePropagationsInput, func(*ec2.GetTransitGatewayRouteTablePropagationsOutput, bool) bool) error + GetTransitGatewayRouteTablePropagationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayRouteTablePropagationsInput, func(*ec2.GetTransitGatewayRouteTablePropagationsOutput, bool) bool, ...request.Option) error + + GetVerifiedAccessEndpointPolicy(*ec2.GetVerifiedAccessEndpointPolicyInput) (*ec2.GetVerifiedAccessEndpointPolicyOutput, error) + GetVerifiedAccessEndpointPolicyWithContext(aws.Context, *ec2.GetVerifiedAccessEndpointPolicyInput, ...request.Option) (*ec2.GetVerifiedAccessEndpointPolicyOutput, error) + GetVerifiedAccessEndpointPolicyRequest(*ec2.GetVerifiedAccessEndpointPolicyInput) (*request.Request, *ec2.GetVerifiedAccessEndpointPolicyOutput) + + GetVerifiedAccessGroupPolicy(*ec2.GetVerifiedAccessGroupPolicyInput) (*ec2.GetVerifiedAccessGroupPolicyOutput, error) + GetVerifiedAccessGroupPolicyWithContext(aws.Context, *ec2.GetVerifiedAccessGroupPolicyInput, ...request.Option) (*ec2.GetVerifiedAccessGroupPolicyOutput, error) + GetVerifiedAccessGroupPolicyRequest(*ec2.GetVerifiedAccessGroupPolicyInput) (*request.Request, *ec2.GetVerifiedAccessGroupPolicyOutput) + + GetVpnConnectionDeviceSampleConfiguration(*ec2.GetVpnConnectionDeviceSampleConfigurationInput) (*ec2.GetVpnConnectionDeviceSampleConfigurationOutput, error) + GetVpnConnectionDeviceSampleConfigurationWithContext(aws.Context, *ec2.GetVpnConnectionDeviceSampleConfigurationInput, ...request.Option) (*ec2.GetVpnConnectionDeviceSampleConfigurationOutput, error) + GetVpnConnectionDeviceSampleConfigurationRequest(*ec2.GetVpnConnectionDeviceSampleConfigurationInput) (*request.Request, *ec2.GetVpnConnectionDeviceSampleConfigurationOutput) + + GetVpnConnectionDeviceTypes(*ec2.GetVpnConnectionDeviceTypesInput) (*ec2.GetVpnConnectionDeviceTypesOutput, error) + GetVpnConnectionDeviceTypesWithContext(aws.Context, *ec2.GetVpnConnectionDeviceTypesInput, ...request.Option) (*ec2.GetVpnConnectionDeviceTypesOutput, error) + GetVpnConnectionDeviceTypesRequest(*ec2.GetVpnConnectionDeviceTypesInput) (*request.Request, *ec2.GetVpnConnectionDeviceTypesOutput) + + GetVpnConnectionDeviceTypesPages(*ec2.GetVpnConnectionDeviceTypesInput, func(*ec2.GetVpnConnectionDeviceTypesOutput, bool) bool) error + GetVpnConnectionDeviceTypesPagesWithContext(aws.Context, *ec2.GetVpnConnectionDeviceTypesInput, func(*ec2.GetVpnConnectionDeviceTypesOutput, bool) bool, ...request.Option) error + + GetVpnTunnelReplacementStatus(*ec2.GetVpnTunnelReplacementStatusInput) (*ec2.GetVpnTunnelReplacementStatusOutput, error) + GetVpnTunnelReplacementStatusWithContext(aws.Context, *ec2.GetVpnTunnelReplacementStatusInput, ...request.Option) (*ec2.GetVpnTunnelReplacementStatusOutput, error) + GetVpnTunnelReplacementStatusRequest(*ec2.GetVpnTunnelReplacementStatusInput) (*request.Request, *ec2.GetVpnTunnelReplacementStatusOutput) + + ImportClientVpnClientCertificateRevocationList(*ec2.ImportClientVpnClientCertificateRevocationListInput) (*ec2.ImportClientVpnClientCertificateRevocationListOutput, error) + ImportClientVpnClientCertificateRevocationListWithContext(aws.Context, *ec2.ImportClientVpnClientCertificateRevocationListInput, ...request.Option) (*ec2.ImportClientVpnClientCertificateRevocationListOutput, error) + ImportClientVpnClientCertificateRevocationListRequest(*ec2.ImportClientVpnClientCertificateRevocationListInput) (*request.Request, *ec2.ImportClientVpnClientCertificateRevocationListOutput) + + ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error) + ImportImageWithContext(aws.Context, *ec2.ImportImageInput, ...request.Option) (*ec2.ImportImageOutput, error) + ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput) + + ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error) + ImportInstanceWithContext(aws.Context, *ec2.ImportInstanceInput, ...request.Option) (*ec2.ImportInstanceOutput, error) + ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput) + + ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) + ImportKeyPairWithContext(aws.Context, *ec2.ImportKeyPairInput, ...request.Option) (*ec2.ImportKeyPairOutput, error) + ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput) + + ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error) + ImportSnapshotWithContext(aws.Context, *ec2.ImportSnapshotInput, ...request.Option) (*ec2.ImportSnapshotOutput, error) + ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput) + + ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error) + ImportVolumeWithContext(aws.Context, *ec2.ImportVolumeInput, ...request.Option) (*ec2.ImportVolumeOutput, error) + ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput) + + ListImagesInRecycleBin(*ec2.ListImagesInRecycleBinInput) (*ec2.ListImagesInRecycleBinOutput, error) + ListImagesInRecycleBinWithContext(aws.Context, *ec2.ListImagesInRecycleBinInput, ...request.Option) (*ec2.ListImagesInRecycleBinOutput, error) + ListImagesInRecycleBinRequest(*ec2.ListImagesInRecycleBinInput) (*request.Request, *ec2.ListImagesInRecycleBinOutput) + + ListImagesInRecycleBinPages(*ec2.ListImagesInRecycleBinInput, func(*ec2.ListImagesInRecycleBinOutput, bool) bool) error + ListImagesInRecycleBinPagesWithContext(aws.Context, *ec2.ListImagesInRecycleBinInput, func(*ec2.ListImagesInRecycleBinOutput, bool) bool, ...request.Option) error + + ListSnapshotsInRecycleBin(*ec2.ListSnapshotsInRecycleBinInput) (*ec2.ListSnapshotsInRecycleBinOutput, error) + ListSnapshotsInRecycleBinWithContext(aws.Context, *ec2.ListSnapshotsInRecycleBinInput, ...request.Option) (*ec2.ListSnapshotsInRecycleBinOutput, error) + ListSnapshotsInRecycleBinRequest(*ec2.ListSnapshotsInRecycleBinInput) (*request.Request, *ec2.ListSnapshotsInRecycleBinOutput) + + ListSnapshotsInRecycleBinPages(*ec2.ListSnapshotsInRecycleBinInput, func(*ec2.ListSnapshotsInRecycleBinOutput, bool) bool) error + ListSnapshotsInRecycleBinPagesWithContext(aws.Context, *ec2.ListSnapshotsInRecycleBinInput, func(*ec2.ListSnapshotsInRecycleBinOutput, bool) bool, ...request.Option) error + + LockSnapshot(*ec2.LockSnapshotInput) (*ec2.LockSnapshotOutput, error) + LockSnapshotWithContext(aws.Context, *ec2.LockSnapshotInput, ...request.Option) (*ec2.LockSnapshotOutput, error) + LockSnapshotRequest(*ec2.LockSnapshotInput) (*request.Request, *ec2.LockSnapshotOutput) + + ModifyAddressAttribute(*ec2.ModifyAddressAttributeInput) (*ec2.ModifyAddressAttributeOutput, error) + ModifyAddressAttributeWithContext(aws.Context, *ec2.ModifyAddressAttributeInput, ...request.Option) (*ec2.ModifyAddressAttributeOutput, error) + ModifyAddressAttributeRequest(*ec2.ModifyAddressAttributeInput) (*request.Request, *ec2.ModifyAddressAttributeOutput) + + ModifyAvailabilityZoneGroup(*ec2.ModifyAvailabilityZoneGroupInput) (*ec2.ModifyAvailabilityZoneGroupOutput, error) + ModifyAvailabilityZoneGroupWithContext(aws.Context, *ec2.ModifyAvailabilityZoneGroupInput, ...request.Option) (*ec2.ModifyAvailabilityZoneGroupOutput, error) + ModifyAvailabilityZoneGroupRequest(*ec2.ModifyAvailabilityZoneGroupInput) (*request.Request, *ec2.ModifyAvailabilityZoneGroupOutput) + + ModifyCapacityReservation(*ec2.ModifyCapacityReservationInput) (*ec2.ModifyCapacityReservationOutput, error) + ModifyCapacityReservationWithContext(aws.Context, *ec2.ModifyCapacityReservationInput, ...request.Option) (*ec2.ModifyCapacityReservationOutput, error) + ModifyCapacityReservationRequest(*ec2.ModifyCapacityReservationInput) (*request.Request, *ec2.ModifyCapacityReservationOutput) + + ModifyCapacityReservationFleet(*ec2.ModifyCapacityReservationFleetInput) (*ec2.ModifyCapacityReservationFleetOutput, error) + ModifyCapacityReservationFleetWithContext(aws.Context, *ec2.ModifyCapacityReservationFleetInput, ...request.Option) (*ec2.ModifyCapacityReservationFleetOutput, error) + ModifyCapacityReservationFleetRequest(*ec2.ModifyCapacityReservationFleetInput) (*request.Request, *ec2.ModifyCapacityReservationFleetOutput) + + ModifyClientVpnEndpoint(*ec2.ModifyClientVpnEndpointInput) (*ec2.ModifyClientVpnEndpointOutput, error) + ModifyClientVpnEndpointWithContext(aws.Context, *ec2.ModifyClientVpnEndpointInput, ...request.Option) (*ec2.ModifyClientVpnEndpointOutput, error) + ModifyClientVpnEndpointRequest(*ec2.ModifyClientVpnEndpointInput) (*request.Request, *ec2.ModifyClientVpnEndpointOutput) + + ModifyDefaultCreditSpecification(*ec2.ModifyDefaultCreditSpecificationInput) (*ec2.ModifyDefaultCreditSpecificationOutput, error) + ModifyDefaultCreditSpecificationWithContext(aws.Context, *ec2.ModifyDefaultCreditSpecificationInput, ...request.Option) (*ec2.ModifyDefaultCreditSpecificationOutput, error) + ModifyDefaultCreditSpecificationRequest(*ec2.ModifyDefaultCreditSpecificationInput) (*request.Request, *ec2.ModifyDefaultCreditSpecificationOutput) + + ModifyEbsDefaultKmsKeyId(*ec2.ModifyEbsDefaultKmsKeyIdInput) (*ec2.ModifyEbsDefaultKmsKeyIdOutput, error) + ModifyEbsDefaultKmsKeyIdWithContext(aws.Context, *ec2.ModifyEbsDefaultKmsKeyIdInput, ...request.Option) (*ec2.ModifyEbsDefaultKmsKeyIdOutput, error) + ModifyEbsDefaultKmsKeyIdRequest(*ec2.ModifyEbsDefaultKmsKeyIdInput) (*request.Request, *ec2.ModifyEbsDefaultKmsKeyIdOutput) + + ModifyFleet(*ec2.ModifyFleetInput) (*ec2.ModifyFleetOutput, error) + ModifyFleetWithContext(aws.Context, *ec2.ModifyFleetInput, ...request.Option) (*ec2.ModifyFleetOutput, error) + ModifyFleetRequest(*ec2.ModifyFleetInput) (*request.Request, *ec2.ModifyFleetOutput) + + ModifyFpgaImageAttribute(*ec2.ModifyFpgaImageAttributeInput) (*ec2.ModifyFpgaImageAttributeOutput, error) + ModifyFpgaImageAttributeWithContext(aws.Context, *ec2.ModifyFpgaImageAttributeInput, ...request.Option) (*ec2.ModifyFpgaImageAttributeOutput, error) + ModifyFpgaImageAttributeRequest(*ec2.ModifyFpgaImageAttributeInput) (*request.Request, *ec2.ModifyFpgaImageAttributeOutput) + + ModifyHosts(*ec2.ModifyHostsInput) (*ec2.ModifyHostsOutput, error) + ModifyHostsWithContext(aws.Context, *ec2.ModifyHostsInput, ...request.Option) (*ec2.ModifyHostsOutput, error) + ModifyHostsRequest(*ec2.ModifyHostsInput) (*request.Request, *ec2.ModifyHostsOutput) + + ModifyIdFormat(*ec2.ModifyIdFormatInput) (*ec2.ModifyIdFormatOutput, error) + ModifyIdFormatWithContext(aws.Context, *ec2.ModifyIdFormatInput, ...request.Option) (*ec2.ModifyIdFormatOutput, error) + ModifyIdFormatRequest(*ec2.ModifyIdFormatInput) (*request.Request, *ec2.ModifyIdFormatOutput) + + ModifyIdentityIdFormat(*ec2.ModifyIdentityIdFormatInput) (*ec2.ModifyIdentityIdFormatOutput, error) + ModifyIdentityIdFormatWithContext(aws.Context, *ec2.ModifyIdentityIdFormatInput, ...request.Option) (*ec2.ModifyIdentityIdFormatOutput, error) + ModifyIdentityIdFormatRequest(*ec2.ModifyIdentityIdFormatInput) (*request.Request, *ec2.ModifyIdentityIdFormatOutput) + + ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error) + ModifyImageAttributeWithContext(aws.Context, *ec2.ModifyImageAttributeInput, ...request.Option) (*ec2.ModifyImageAttributeOutput, error) + ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput) + + ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) + ModifyInstanceAttributeWithContext(aws.Context, *ec2.ModifyInstanceAttributeInput, ...request.Option) (*ec2.ModifyInstanceAttributeOutput, error) + ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput) + + ModifyInstanceCapacityReservationAttributes(*ec2.ModifyInstanceCapacityReservationAttributesInput) (*ec2.ModifyInstanceCapacityReservationAttributesOutput, error) + ModifyInstanceCapacityReservationAttributesWithContext(aws.Context, *ec2.ModifyInstanceCapacityReservationAttributesInput, ...request.Option) (*ec2.ModifyInstanceCapacityReservationAttributesOutput, error) + ModifyInstanceCapacityReservationAttributesRequest(*ec2.ModifyInstanceCapacityReservationAttributesInput) (*request.Request, *ec2.ModifyInstanceCapacityReservationAttributesOutput) + + ModifyInstanceCreditSpecification(*ec2.ModifyInstanceCreditSpecificationInput) (*ec2.ModifyInstanceCreditSpecificationOutput, error) + ModifyInstanceCreditSpecificationWithContext(aws.Context, *ec2.ModifyInstanceCreditSpecificationInput, ...request.Option) (*ec2.ModifyInstanceCreditSpecificationOutput, error) + ModifyInstanceCreditSpecificationRequest(*ec2.ModifyInstanceCreditSpecificationInput) (*request.Request, *ec2.ModifyInstanceCreditSpecificationOutput) + + ModifyInstanceEventStartTime(*ec2.ModifyInstanceEventStartTimeInput) (*ec2.ModifyInstanceEventStartTimeOutput, error) + ModifyInstanceEventStartTimeWithContext(aws.Context, *ec2.ModifyInstanceEventStartTimeInput, ...request.Option) (*ec2.ModifyInstanceEventStartTimeOutput, error) + ModifyInstanceEventStartTimeRequest(*ec2.ModifyInstanceEventStartTimeInput) (*request.Request, *ec2.ModifyInstanceEventStartTimeOutput) + + ModifyInstanceEventWindow(*ec2.ModifyInstanceEventWindowInput) (*ec2.ModifyInstanceEventWindowOutput, error) + ModifyInstanceEventWindowWithContext(aws.Context, *ec2.ModifyInstanceEventWindowInput, ...request.Option) (*ec2.ModifyInstanceEventWindowOutput, error) + ModifyInstanceEventWindowRequest(*ec2.ModifyInstanceEventWindowInput) (*request.Request, *ec2.ModifyInstanceEventWindowOutput) + + ModifyInstanceMaintenanceOptions(*ec2.ModifyInstanceMaintenanceOptionsInput) (*ec2.ModifyInstanceMaintenanceOptionsOutput, error) + ModifyInstanceMaintenanceOptionsWithContext(aws.Context, *ec2.ModifyInstanceMaintenanceOptionsInput, ...request.Option) (*ec2.ModifyInstanceMaintenanceOptionsOutput, error) + ModifyInstanceMaintenanceOptionsRequest(*ec2.ModifyInstanceMaintenanceOptionsInput) (*request.Request, *ec2.ModifyInstanceMaintenanceOptionsOutput) + + ModifyInstanceMetadataDefaults(*ec2.ModifyInstanceMetadataDefaultsInput) (*ec2.ModifyInstanceMetadataDefaultsOutput, error) + ModifyInstanceMetadataDefaultsWithContext(aws.Context, *ec2.ModifyInstanceMetadataDefaultsInput, ...request.Option) (*ec2.ModifyInstanceMetadataDefaultsOutput, error) + ModifyInstanceMetadataDefaultsRequest(*ec2.ModifyInstanceMetadataDefaultsInput) (*request.Request, *ec2.ModifyInstanceMetadataDefaultsOutput) + + ModifyInstanceMetadataOptions(*ec2.ModifyInstanceMetadataOptionsInput) (*ec2.ModifyInstanceMetadataOptionsOutput, error) + ModifyInstanceMetadataOptionsWithContext(aws.Context, *ec2.ModifyInstanceMetadataOptionsInput, ...request.Option) (*ec2.ModifyInstanceMetadataOptionsOutput, error) + ModifyInstanceMetadataOptionsRequest(*ec2.ModifyInstanceMetadataOptionsInput) (*request.Request, *ec2.ModifyInstanceMetadataOptionsOutput) + + ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error) + ModifyInstancePlacementWithContext(aws.Context, *ec2.ModifyInstancePlacementInput, ...request.Option) (*ec2.ModifyInstancePlacementOutput, error) + ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput) + + ModifyIpam(*ec2.ModifyIpamInput) (*ec2.ModifyIpamOutput, error) + ModifyIpamWithContext(aws.Context, *ec2.ModifyIpamInput, ...request.Option) (*ec2.ModifyIpamOutput, error) + ModifyIpamRequest(*ec2.ModifyIpamInput) (*request.Request, *ec2.ModifyIpamOutput) + + ModifyIpamPool(*ec2.ModifyIpamPoolInput) (*ec2.ModifyIpamPoolOutput, error) + ModifyIpamPoolWithContext(aws.Context, *ec2.ModifyIpamPoolInput, ...request.Option) (*ec2.ModifyIpamPoolOutput, error) + ModifyIpamPoolRequest(*ec2.ModifyIpamPoolInput) (*request.Request, *ec2.ModifyIpamPoolOutput) + + ModifyIpamResourceCidr(*ec2.ModifyIpamResourceCidrInput) (*ec2.ModifyIpamResourceCidrOutput, error) + ModifyIpamResourceCidrWithContext(aws.Context, *ec2.ModifyIpamResourceCidrInput, ...request.Option) (*ec2.ModifyIpamResourceCidrOutput, error) + ModifyIpamResourceCidrRequest(*ec2.ModifyIpamResourceCidrInput) (*request.Request, *ec2.ModifyIpamResourceCidrOutput) + + ModifyIpamResourceDiscovery(*ec2.ModifyIpamResourceDiscoveryInput) (*ec2.ModifyIpamResourceDiscoveryOutput, error) + ModifyIpamResourceDiscoveryWithContext(aws.Context, *ec2.ModifyIpamResourceDiscoveryInput, ...request.Option) (*ec2.ModifyIpamResourceDiscoveryOutput, error) + ModifyIpamResourceDiscoveryRequest(*ec2.ModifyIpamResourceDiscoveryInput) (*request.Request, *ec2.ModifyIpamResourceDiscoveryOutput) + + ModifyIpamScope(*ec2.ModifyIpamScopeInput) (*ec2.ModifyIpamScopeOutput, error) + ModifyIpamScopeWithContext(aws.Context, *ec2.ModifyIpamScopeInput, ...request.Option) (*ec2.ModifyIpamScopeOutput, error) + ModifyIpamScopeRequest(*ec2.ModifyIpamScopeInput) (*request.Request, *ec2.ModifyIpamScopeOutput) + + ModifyLaunchTemplate(*ec2.ModifyLaunchTemplateInput) (*ec2.ModifyLaunchTemplateOutput, error) + ModifyLaunchTemplateWithContext(aws.Context, *ec2.ModifyLaunchTemplateInput, ...request.Option) (*ec2.ModifyLaunchTemplateOutput, error) + ModifyLaunchTemplateRequest(*ec2.ModifyLaunchTemplateInput) (*request.Request, *ec2.ModifyLaunchTemplateOutput) + + ModifyLocalGatewayRoute(*ec2.ModifyLocalGatewayRouteInput) (*ec2.ModifyLocalGatewayRouteOutput, error) + ModifyLocalGatewayRouteWithContext(aws.Context, *ec2.ModifyLocalGatewayRouteInput, ...request.Option) (*ec2.ModifyLocalGatewayRouteOutput, error) + ModifyLocalGatewayRouteRequest(*ec2.ModifyLocalGatewayRouteInput) (*request.Request, *ec2.ModifyLocalGatewayRouteOutput) + + ModifyManagedPrefixList(*ec2.ModifyManagedPrefixListInput) (*ec2.ModifyManagedPrefixListOutput, error) + ModifyManagedPrefixListWithContext(aws.Context, *ec2.ModifyManagedPrefixListInput, ...request.Option) (*ec2.ModifyManagedPrefixListOutput, error) + ModifyManagedPrefixListRequest(*ec2.ModifyManagedPrefixListInput) (*request.Request, *ec2.ModifyManagedPrefixListOutput) + + ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error) + ModifyNetworkInterfaceAttributeWithContext(aws.Context, *ec2.ModifyNetworkInterfaceAttributeInput, ...request.Option) (*ec2.ModifyNetworkInterfaceAttributeOutput, error) + ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput) + + ModifyPrivateDnsNameOptions(*ec2.ModifyPrivateDnsNameOptionsInput) (*ec2.ModifyPrivateDnsNameOptionsOutput, error) + ModifyPrivateDnsNameOptionsWithContext(aws.Context, *ec2.ModifyPrivateDnsNameOptionsInput, ...request.Option) (*ec2.ModifyPrivateDnsNameOptionsOutput, error) + ModifyPrivateDnsNameOptionsRequest(*ec2.ModifyPrivateDnsNameOptionsInput) (*request.Request, *ec2.ModifyPrivateDnsNameOptionsOutput) + + ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error) + ModifyReservedInstancesWithContext(aws.Context, *ec2.ModifyReservedInstancesInput, ...request.Option) (*ec2.ModifyReservedInstancesOutput, error) + ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput) + + ModifySecurityGroupRules(*ec2.ModifySecurityGroupRulesInput) (*ec2.ModifySecurityGroupRulesOutput, error) + ModifySecurityGroupRulesWithContext(aws.Context, *ec2.ModifySecurityGroupRulesInput, ...request.Option) (*ec2.ModifySecurityGroupRulesOutput, error) + ModifySecurityGroupRulesRequest(*ec2.ModifySecurityGroupRulesInput) (*request.Request, *ec2.ModifySecurityGroupRulesOutput) + + ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error) + ModifySnapshotAttributeWithContext(aws.Context, *ec2.ModifySnapshotAttributeInput, ...request.Option) (*ec2.ModifySnapshotAttributeOutput, error) + ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput) + + ModifySnapshotTier(*ec2.ModifySnapshotTierInput) (*ec2.ModifySnapshotTierOutput, error) + ModifySnapshotTierWithContext(aws.Context, *ec2.ModifySnapshotTierInput, ...request.Option) (*ec2.ModifySnapshotTierOutput, error) + ModifySnapshotTierRequest(*ec2.ModifySnapshotTierInput) (*request.Request, *ec2.ModifySnapshotTierOutput) + + ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error) + ModifySpotFleetRequestWithContext(aws.Context, *ec2.ModifySpotFleetRequestInput, ...request.Option) (*ec2.ModifySpotFleetRequestOutput, error) + ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput) + + ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error) + ModifySubnetAttributeWithContext(aws.Context, *ec2.ModifySubnetAttributeInput, ...request.Option) (*ec2.ModifySubnetAttributeOutput, error) + ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput) + + ModifyTrafficMirrorFilterNetworkServices(*ec2.ModifyTrafficMirrorFilterNetworkServicesInput) (*ec2.ModifyTrafficMirrorFilterNetworkServicesOutput, error) + ModifyTrafficMirrorFilterNetworkServicesWithContext(aws.Context, *ec2.ModifyTrafficMirrorFilterNetworkServicesInput, ...request.Option) (*ec2.ModifyTrafficMirrorFilterNetworkServicesOutput, error) + ModifyTrafficMirrorFilterNetworkServicesRequest(*ec2.ModifyTrafficMirrorFilterNetworkServicesInput) (*request.Request, *ec2.ModifyTrafficMirrorFilterNetworkServicesOutput) + + ModifyTrafficMirrorFilterRule(*ec2.ModifyTrafficMirrorFilterRuleInput) (*ec2.ModifyTrafficMirrorFilterRuleOutput, error) + ModifyTrafficMirrorFilterRuleWithContext(aws.Context, *ec2.ModifyTrafficMirrorFilterRuleInput, ...request.Option) (*ec2.ModifyTrafficMirrorFilterRuleOutput, error) + ModifyTrafficMirrorFilterRuleRequest(*ec2.ModifyTrafficMirrorFilterRuleInput) (*request.Request, *ec2.ModifyTrafficMirrorFilterRuleOutput) + + ModifyTrafficMirrorSession(*ec2.ModifyTrafficMirrorSessionInput) (*ec2.ModifyTrafficMirrorSessionOutput, error) + ModifyTrafficMirrorSessionWithContext(aws.Context, *ec2.ModifyTrafficMirrorSessionInput, ...request.Option) (*ec2.ModifyTrafficMirrorSessionOutput, error) + ModifyTrafficMirrorSessionRequest(*ec2.ModifyTrafficMirrorSessionInput) (*request.Request, *ec2.ModifyTrafficMirrorSessionOutput) + + ModifyTransitGateway(*ec2.ModifyTransitGatewayInput) (*ec2.ModifyTransitGatewayOutput, error) + ModifyTransitGatewayWithContext(aws.Context, *ec2.ModifyTransitGatewayInput, ...request.Option) (*ec2.ModifyTransitGatewayOutput, error) + ModifyTransitGatewayRequest(*ec2.ModifyTransitGatewayInput) (*request.Request, *ec2.ModifyTransitGatewayOutput) + + ModifyTransitGatewayPrefixListReference(*ec2.ModifyTransitGatewayPrefixListReferenceInput) (*ec2.ModifyTransitGatewayPrefixListReferenceOutput, error) + ModifyTransitGatewayPrefixListReferenceWithContext(aws.Context, *ec2.ModifyTransitGatewayPrefixListReferenceInput, ...request.Option) (*ec2.ModifyTransitGatewayPrefixListReferenceOutput, error) + ModifyTransitGatewayPrefixListReferenceRequest(*ec2.ModifyTransitGatewayPrefixListReferenceInput) (*request.Request, *ec2.ModifyTransitGatewayPrefixListReferenceOutput) + + ModifyTransitGatewayVpcAttachment(*ec2.ModifyTransitGatewayVpcAttachmentInput) (*ec2.ModifyTransitGatewayVpcAttachmentOutput, error) + ModifyTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.ModifyTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.ModifyTransitGatewayVpcAttachmentOutput, error) + ModifyTransitGatewayVpcAttachmentRequest(*ec2.ModifyTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.ModifyTransitGatewayVpcAttachmentOutput) + + ModifyVerifiedAccessEndpoint(*ec2.ModifyVerifiedAccessEndpointInput) (*ec2.ModifyVerifiedAccessEndpointOutput, error) + ModifyVerifiedAccessEndpointWithContext(aws.Context, *ec2.ModifyVerifiedAccessEndpointInput, ...request.Option) (*ec2.ModifyVerifiedAccessEndpointOutput, error) + ModifyVerifiedAccessEndpointRequest(*ec2.ModifyVerifiedAccessEndpointInput) (*request.Request, *ec2.ModifyVerifiedAccessEndpointOutput) + + ModifyVerifiedAccessEndpointPolicy(*ec2.ModifyVerifiedAccessEndpointPolicyInput) (*ec2.ModifyVerifiedAccessEndpointPolicyOutput, error) + ModifyVerifiedAccessEndpointPolicyWithContext(aws.Context, *ec2.ModifyVerifiedAccessEndpointPolicyInput, ...request.Option) (*ec2.ModifyVerifiedAccessEndpointPolicyOutput, error) + ModifyVerifiedAccessEndpointPolicyRequest(*ec2.ModifyVerifiedAccessEndpointPolicyInput) (*request.Request, *ec2.ModifyVerifiedAccessEndpointPolicyOutput) + + ModifyVerifiedAccessGroup(*ec2.ModifyVerifiedAccessGroupInput) (*ec2.ModifyVerifiedAccessGroupOutput, error) + ModifyVerifiedAccessGroupWithContext(aws.Context, *ec2.ModifyVerifiedAccessGroupInput, ...request.Option) (*ec2.ModifyVerifiedAccessGroupOutput, error) + ModifyVerifiedAccessGroupRequest(*ec2.ModifyVerifiedAccessGroupInput) (*request.Request, *ec2.ModifyVerifiedAccessGroupOutput) + + ModifyVerifiedAccessGroupPolicy(*ec2.ModifyVerifiedAccessGroupPolicyInput) (*ec2.ModifyVerifiedAccessGroupPolicyOutput, error) + ModifyVerifiedAccessGroupPolicyWithContext(aws.Context, *ec2.ModifyVerifiedAccessGroupPolicyInput, ...request.Option) (*ec2.ModifyVerifiedAccessGroupPolicyOutput, error) + ModifyVerifiedAccessGroupPolicyRequest(*ec2.ModifyVerifiedAccessGroupPolicyInput) (*request.Request, *ec2.ModifyVerifiedAccessGroupPolicyOutput) + + ModifyVerifiedAccessInstance(*ec2.ModifyVerifiedAccessInstanceInput) (*ec2.ModifyVerifiedAccessInstanceOutput, error) + ModifyVerifiedAccessInstanceWithContext(aws.Context, *ec2.ModifyVerifiedAccessInstanceInput, ...request.Option) (*ec2.ModifyVerifiedAccessInstanceOutput, error) + ModifyVerifiedAccessInstanceRequest(*ec2.ModifyVerifiedAccessInstanceInput) (*request.Request, *ec2.ModifyVerifiedAccessInstanceOutput) + + ModifyVerifiedAccessInstanceLoggingConfiguration(*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationInput) (*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationOutput, error) + ModifyVerifiedAccessInstanceLoggingConfigurationWithContext(aws.Context, *ec2.ModifyVerifiedAccessInstanceLoggingConfigurationInput, ...request.Option) (*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationOutput, error) + ModifyVerifiedAccessInstanceLoggingConfigurationRequest(*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationInput) (*request.Request, *ec2.ModifyVerifiedAccessInstanceLoggingConfigurationOutput) + + ModifyVerifiedAccessTrustProvider(*ec2.ModifyVerifiedAccessTrustProviderInput) (*ec2.ModifyVerifiedAccessTrustProviderOutput, error) + ModifyVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.ModifyVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.ModifyVerifiedAccessTrustProviderOutput, error) + ModifyVerifiedAccessTrustProviderRequest(*ec2.ModifyVerifiedAccessTrustProviderInput) (*request.Request, *ec2.ModifyVerifiedAccessTrustProviderOutput) + + ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) + ModifyVolumeWithContext(aws.Context, *ec2.ModifyVolumeInput, ...request.Option) (*ec2.ModifyVolumeOutput, error) + ModifyVolumeRequest(*ec2.ModifyVolumeInput) (*request.Request, *ec2.ModifyVolumeOutput) + + ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error) + ModifyVolumeAttributeWithContext(aws.Context, *ec2.ModifyVolumeAttributeInput, ...request.Option) (*ec2.ModifyVolumeAttributeOutput, error) + ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput) + + ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error) + ModifyVpcAttributeWithContext(aws.Context, *ec2.ModifyVpcAttributeInput, ...request.Option) (*ec2.ModifyVpcAttributeOutput, error) + ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput) + + ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error) + ModifyVpcEndpointWithContext(aws.Context, *ec2.ModifyVpcEndpointInput, ...request.Option) (*ec2.ModifyVpcEndpointOutput, error) + ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput) + + ModifyVpcEndpointConnectionNotification(*ec2.ModifyVpcEndpointConnectionNotificationInput) (*ec2.ModifyVpcEndpointConnectionNotificationOutput, error) + ModifyVpcEndpointConnectionNotificationWithContext(aws.Context, *ec2.ModifyVpcEndpointConnectionNotificationInput, ...request.Option) (*ec2.ModifyVpcEndpointConnectionNotificationOutput, error) + ModifyVpcEndpointConnectionNotificationRequest(*ec2.ModifyVpcEndpointConnectionNotificationInput) (*request.Request, *ec2.ModifyVpcEndpointConnectionNotificationOutput) + + ModifyVpcEndpointServiceConfiguration(*ec2.ModifyVpcEndpointServiceConfigurationInput) (*ec2.ModifyVpcEndpointServiceConfigurationOutput, error) + ModifyVpcEndpointServiceConfigurationWithContext(aws.Context, *ec2.ModifyVpcEndpointServiceConfigurationInput, ...request.Option) (*ec2.ModifyVpcEndpointServiceConfigurationOutput, error) + ModifyVpcEndpointServiceConfigurationRequest(*ec2.ModifyVpcEndpointServiceConfigurationInput) (*request.Request, *ec2.ModifyVpcEndpointServiceConfigurationOutput) + + ModifyVpcEndpointServicePayerResponsibility(*ec2.ModifyVpcEndpointServicePayerResponsibilityInput) (*ec2.ModifyVpcEndpointServicePayerResponsibilityOutput, error) + ModifyVpcEndpointServicePayerResponsibilityWithContext(aws.Context, *ec2.ModifyVpcEndpointServicePayerResponsibilityInput, ...request.Option) (*ec2.ModifyVpcEndpointServicePayerResponsibilityOutput, error) + ModifyVpcEndpointServicePayerResponsibilityRequest(*ec2.ModifyVpcEndpointServicePayerResponsibilityInput) (*request.Request, *ec2.ModifyVpcEndpointServicePayerResponsibilityOutput) + + ModifyVpcEndpointServicePermissions(*ec2.ModifyVpcEndpointServicePermissionsInput) (*ec2.ModifyVpcEndpointServicePermissionsOutput, error) + ModifyVpcEndpointServicePermissionsWithContext(aws.Context, *ec2.ModifyVpcEndpointServicePermissionsInput, ...request.Option) (*ec2.ModifyVpcEndpointServicePermissionsOutput, error) + ModifyVpcEndpointServicePermissionsRequest(*ec2.ModifyVpcEndpointServicePermissionsInput) (*request.Request, *ec2.ModifyVpcEndpointServicePermissionsOutput) + + ModifyVpcPeeringConnectionOptions(*ec2.ModifyVpcPeeringConnectionOptionsInput) (*ec2.ModifyVpcPeeringConnectionOptionsOutput, error) + ModifyVpcPeeringConnectionOptionsWithContext(aws.Context, *ec2.ModifyVpcPeeringConnectionOptionsInput, ...request.Option) (*ec2.ModifyVpcPeeringConnectionOptionsOutput, error) + ModifyVpcPeeringConnectionOptionsRequest(*ec2.ModifyVpcPeeringConnectionOptionsInput) (*request.Request, *ec2.ModifyVpcPeeringConnectionOptionsOutput) + + ModifyVpcTenancy(*ec2.ModifyVpcTenancyInput) (*ec2.ModifyVpcTenancyOutput, error) + ModifyVpcTenancyWithContext(aws.Context, *ec2.ModifyVpcTenancyInput, ...request.Option) (*ec2.ModifyVpcTenancyOutput, error) + ModifyVpcTenancyRequest(*ec2.ModifyVpcTenancyInput) (*request.Request, *ec2.ModifyVpcTenancyOutput) + + ModifyVpnConnection(*ec2.ModifyVpnConnectionInput) (*ec2.ModifyVpnConnectionOutput, error) + ModifyVpnConnectionWithContext(aws.Context, *ec2.ModifyVpnConnectionInput, ...request.Option) (*ec2.ModifyVpnConnectionOutput, error) + ModifyVpnConnectionRequest(*ec2.ModifyVpnConnectionInput) (*request.Request, *ec2.ModifyVpnConnectionOutput) + + ModifyVpnConnectionOptions(*ec2.ModifyVpnConnectionOptionsInput) (*ec2.ModifyVpnConnectionOptionsOutput, error) + ModifyVpnConnectionOptionsWithContext(aws.Context, *ec2.ModifyVpnConnectionOptionsInput, ...request.Option) (*ec2.ModifyVpnConnectionOptionsOutput, error) + ModifyVpnConnectionOptionsRequest(*ec2.ModifyVpnConnectionOptionsInput) (*request.Request, *ec2.ModifyVpnConnectionOptionsOutput) + + ModifyVpnTunnelCertificate(*ec2.ModifyVpnTunnelCertificateInput) (*ec2.ModifyVpnTunnelCertificateOutput, error) + ModifyVpnTunnelCertificateWithContext(aws.Context, *ec2.ModifyVpnTunnelCertificateInput, ...request.Option) (*ec2.ModifyVpnTunnelCertificateOutput, error) + ModifyVpnTunnelCertificateRequest(*ec2.ModifyVpnTunnelCertificateInput) (*request.Request, *ec2.ModifyVpnTunnelCertificateOutput) + + ModifyVpnTunnelOptions(*ec2.ModifyVpnTunnelOptionsInput) (*ec2.ModifyVpnTunnelOptionsOutput, error) + ModifyVpnTunnelOptionsWithContext(aws.Context, *ec2.ModifyVpnTunnelOptionsInput, ...request.Option) (*ec2.ModifyVpnTunnelOptionsOutput, error) + ModifyVpnTunnelOptionsRequest(*ec2.ModifyVpnTunnelOptionsInput) (*request.Request, *ec2.ModifyVpnTunnelOptionsOutput) + + MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error) + MonitorInstancesWithContext(aws.Context, *ec2.MonitorInstancesInput, ...request.Option) (*ec2.MonitorInstancesOutput, error) + MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput) + + MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error) + MoveAddressToVpcWithContext(aws.Context, *ec2.MoveAddressToVpcInput, ...request.Option) (*ec2.MoveAddressToVpcOutput, error) + MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput) + + MoveByoipCidrToIpam(*ec2.MoveByoipCidrToIpamInput) (*ec2.MoveByoipCidrToIpamOutput, error) + MoveByoipCidrToIpamWithContext(aws.Context, *ec2.MoveByoipCidrToIpamInput, ...request.Option) (*ec2.MoveByoipCidrToIpamOutput, error) + MoveByoipCidrToIpamRequest(*ec2.MoveByoipCidrToIpamInput) (*request.Request, *ec2.MoveByoipCidrToIpamOutput) + + ProvisionByoipCidr(*ec2.ProvisionByoipCidrInput) (*ec2.ProvisionByoipCidrOutput, error) + ProvisionByoipCidrWithContext(aws.Context, *ec2.ProvisionByoipCidrInput, ...request.Option) (*ec2.ProvisionByoipCidrOutput, error) + ProvisionByoipCidrRequest(*ec2.ProvisionByoipCidrInput) (*request.Request, *ec2.ProvisionByoipCidrOutput) + + ProvisionIpamByoasn(*ec2.ProvisionIpamByoasnInput) (*ec2.ProvisionIpamByoasnOutput, error) + ProvisionIpamByoasnWithContext(aws.Context, *ec2.ProvisionIpamByoasnInput, ...request.Option) (*ec2.ProvisionIpamByoasnOutput, error) + ProvisionIpamByoasnRequest(*ec2.ProvisionIpamByoasnInput) (*request.Request, *ec2.ProvisionIpamByoasnOutput) + + ProvisionIpamPoolCidr(*ec2.ProvisionIpamPoolCidrInput) (*ec2.ProvisionIpamPoolCidrOutput, error) + ProvisionIpamPoolCidrWithContext(aws.Context, *ec2.ProvisionIpamPoolCidrInput, ...request.Option) (*ec2.ProvisionIpamPoolCidrOutput, error) + ProvisionIpamPoolCidrRequest(*ec2.ProvisionIpamPoolCidrInput) (*request.Request, *ec2.ProvisionIpamPoolCidrOutput) + + ProvisionPublicIpv4PoolCidr(*ec2.ProvisionPublicIpv4PoolCidrInput) (*ec2.ProvisionPublicIpv4PoolCidrOutput, error) + ProvisionPublicIpv4PoolCidrWithContext(aws.Context, *ec2.ProvisionPublicIpv4PoolCidrInput, ...request.Option) (*ec2.ProvisionPublicIpv4PoolCidrOutput, error) + ProvisionPublicIpv4PoolCidrRequest(*ec2.ProvisionPublicIpv4PoolCidrInput) (*request.Request, *ec2.ProvisionPublicIpv4PoolCidrOutput) + + PurchaseCapacityBlock(*ec2.PurchaseCapacityBlockInput) (*ec2.PurchaseCapacityBlockOutput, error) + PurchaseCapacityBlockWithContext(aws.Context, *ec2.PurchaseCapacityBlockInput, ...request.Option) (*ec2.PurchaseCapacityBlockOutput, error) + PurchaseCapacityBlockRequest(*ec2.PurchaseCapacityBlockInput) (*request.Request, *ec2.PurchaseCapacityBlockOutput) + + PurchaseHostReservation(*ec2.PurchaseHostReservationInput) (*ec2.PurchaseHostReservationOutput, error) + PurchaseHostReservationWithContext(aws.Context, *ec2.PurchaseHostReservationInput, ...request.Option) (*ec2.PurchaseHostReservationOutput, error) + PurchaseHostReservationRequest(*ec2.PurchaseHostReservationInput) (*request.Request, *ec2.PurchaseHostReservationOutput) + + PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error) + PurchaseReservedInstancesOfferingWithContext(aws.Context, *ec2.PurchaseReservedInstancesOfferingInput, ...request.Option) (*ec2.PurchaseReservedInstancesOfferingOutput, error) + PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput) + + PurchaseScheduledInstances(*ec2.PurchaseScheduledInstancesInput) (*ec2.PurchaseScheduledInstancesOutput, error) + PurchaseScheduledInstancesWithContext(aws.Context, *ec2.PurchaseScheduledInstancesInput, ...request.Option) (*ec2.PurchaseScheduledInstancesOutput, error) + PurchaseScheduledInstancesRequest(*ec2.PurchaseScheduledInstancesInput) (*request.Request, *ec2.PurchaseScheduledInstancesOutput) + + RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error) + RebootInstancesWithContext(aws.Context, *ec2.RebootInstancesInput, ...request.Option) (*ec2.RebootInstancesOutput, error) + RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput) + + RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error) + RegisterImageWithContext(aws.Context, *ec2.RegisterImageInput, ...request.Option) (*ec2.RegisterImageOutput, error) + RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput) + + RegisterInstanceEventNotificationAttributes(*ec2.RegisterInstanceEventNotificationAttributesInput) (*ec2.RegisterInstanceEventNotificationAttributesOutput, error) + RegisterInstanceEventNotificationAttributesWithContext(aws.Context, *ec2.RegisterInstanceEventNotificationAttributesInput, ...request.Option) (*ec2.RegisterInstanceEventNotificationAttributesOutput, error) + RegisterInstanceEventNotificationAttributesRequest(*ec2.RegisterInstanceEventNotificationAttributesInput) (*request.Request, *ec2.RegisterInstanceEventNotificationAttributesOutput) + + RegisterTransitGatewayMulticastGroupMembers(*ec2.RegisterTransitGatewayMulticastGroupMembersInput) (*ec2.RegisterTransitGatewayMulticastGroupMembersOutput, error) + RegisterTransitGatewayMulticastGroupMembersWithContext(aws.Context, *ec2.RegisterTransitGatewayMulticastGroupMembersInput, ...request.Option) (*ec2.RegisterTransitGatewayMulticastGroupMembersOutput, error) + RegisterTransitGatewayMulticastGroupMembersRequest(*ec2.RegisterTransitGatewayMulticastGroupMembersInput) (*request.Request, *ec2.RegisterTransitGatewayMulticastGroupMembersOutput) + + RegisterTransitGatewayMulticastGroupSources(*ec2.RegisterTransitGatewayMulticastGroupSourcesInput) (*ec2.RegisterTransitGatewayMulticastGroupSourcesOutput, error) + RegisterTransitGatewayMulticastGroupSourcesWithContext(aws.Context, *ec2.RegisterTransitGatewayMulticastGroupSourcesInput, ...request.Option) (*ec2.RegisterTransitGatewayMulticastGroupSourcesOutput, error) + RegisterTransitGatewayMulticastGroupSourcesRequest(*ec2.RegisterTransitGatewayMulticastGroupSourcesInput) (*request.Request, *ec2.RegisterTransitGatewayMulticastGroupSourcesOutput) + + RejectTransitGatewayMulticastDomainAssociations(*ec2.RejectTransitGatewayMulticastDomainAssociationsInput) (*ec2.RejectTransitGatewayMulticastDomainAssociationsOutput, error) + RejectTransitGatewayMulticastDomainAssociationsWithContext(aws.Context, *ec2.RejectTransitGatewayMulticastDomainAssociationsInput, ...request.Option) (*ec2.RejectTransitGatewayMulticastDomainAssociationsOutput, error) + RejectTransitGatewayMulticastDomainAssociationsRequest(*ec2.RejectTransitGatewayMulticastDomainAssociationsInput) (*request.Request, *ec2.RejectTransitGatewayMulticastDomainAssociationsOutput) + + RejectTransitGatewayPeeringAttachment(*ec2.RejectTransitGatewayPeeringAttachmentInput) (*ec2.RejectTransitGatewayPeeringAttachmentOutput, error) + RejectTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.RejectTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.RejectTransitGatewayPeeringAttachmentOutput, error) + RejectTransitGatewayPeeringAttachmentRequest(*ec2.RejectTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.RejectTransitGatewayPeeringAttachmentOutput) + + RejectTransitGatewayVpcAttachment(*ec2.RejectTransitGatewayVpcAttachmentInput) (*ec2.RejectTransitGatewayVpcAttachmentOutput, error) + RejectTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.RejectTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.RejectTransitGatewayVpcAttachmentOutput, error) + RejectTransitGatewayVpcAttachmentRequest(*ec2.RejectTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.RejectTransitGatewayVpcAttachmentOutput) + + RejectVpcEndpointConnections(*ec2.RejectVpcEndpointConnectionsInput) (*ec2.RejectVpcEndpointConnectionsOutput, error) + RejectVpcEndpointConnectionsWithContext(aws.Context, *ec2.RejectVpcEndpointConnectionsInput, ...request.Option) (*ec2.RejectVpcEndpointConnectionsOutput, error) + RejectVpcEndpointConnectionsRequest(*ec2.RejectVpcEndpointConnectionsInput) (*request.Request, *ec2.RejectVpcEndpointConnectionsOutput) + + RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error) + RejectVpcPeeringConnectionWithContext(aws.Context, *ec2.RejectVpcPeeringConnectionInput, ...request.Option) (*ec2.RejectVpcPeeringConnectionOutput, error) + RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput) + + ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error) + ReleaseAddressWithContext(aws.Context, *ec2.ReleaseAddressInput, ...request.Option) (*ec2.ReleaseAddressOutput, error) + ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput) + + ReleaseHosts(*ec2.ReleaseHostsInput) (*ec2.ReleaseHostsOutput, error) + ReleaseHostsWithContext(aws.Context, *ec2.ReleaseHostsInput, ...request.Option) (*ec2.ReleaseHostsOutput, error) + ReleaseHostsRequest(*ec2.ReleaseHostsInput) (*request.Request, *ec2.ReleaseHostsOutput) + + ReleaseIpamPoolAllocation(*ec2.ReleaseIpamPoolAllocationInput) (*ec2.ReleaseIpamPoolAllocationOutput, error) + ReleaseIpamPoolAllocationWithContext(aws.Context, *ec2.ReleaseIpamPoolAllocationInput, ...request.Option) (*ec2.ReleaseIpamPoolAllocationOutput, error) + ReleaseIpamPoolAllocationRequest(*ec2.ReleaseIpamPoolAllocationInput) (*request.Request, *ec2.ReleaseIpamPoolAllocationOutput) + + ReplaceIamInstanceProfileAssociation(*ec2.ReplaceIamInstanceProfileAssociationInput) (*ec2.ReplaceIamInstanceProfileAssociationOutput, error) + ReplaceIamInstanceProfileAssociationWithContext(aws.Context, *ec2.ReplaceIamInstanceProfileAssociationInput, ...request.Option) (*ec2.ReplaceIamInstanceProfileAssociationOutput, error) + ReplaceIamInstanceProfileAssociationRequest(*ec2.ReplaceIamInstanceProfileAssociationInput) (*request.Request, *ec2.ReplaceIamInstanceProfileAssociationOutput) + + ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error) + ReplaceNetworkAclAssociationWithContext(aws.Context, *ec2.ReplaceNetworkAclAssociationInput, ...request.Option) (*ec2.ReplaceNetworkAclAssociationOutput, error) + ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput) + + ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error) + ReplaceNetworkAclEntryWithContext(aws.Context, *ec2.ReplaceNetworkAclEntryInput, ...request.Option) (*ec2.ReplaceNetworkAclEntryOutput, error) + ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput) + + ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error) + ReplaceRouteWithContext(aws.Context, *ec2.ReplaceRouteInput, ...request.Option) (*ec2.ReplaceRouteOutput, error) + ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput) + + ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error) + ReplaceRouteTableAssociationWithContext(aws.Context, *ec2.ReplaceRouteTableAssociationInput, ...request.Option) (*ec2.ReplaceRouteTableAssociationOutput, error) + ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput) + + ReplaceTransitGatewayRoute(*ec2.ReplaceTransitGatewayRouteInput) (*ec2.ReplaceTransitGatewayRouteOutput, error) + ReplaceTransitGatewayRouteWithContext(aws.Context, *ec2.ReplaceTransitGatewayRouteInput, ...request.Option) (*ec2.ReplaceTransitGatewayRouteOutput, error) + ReplaceTransitGatewayRouteRequest(*ec2.ReplaceTransitGatewayRouteInput) (*request.Request, *ec2.ReplaceTransitGatewayRouteOutput) + + ReplaceVpnTunnel(*ec2.ReplaceVpnTunnelInput) (*ec2.ReplaceVpnTunnelOutput, error) + ReplaceVpnTunnelWithContext(aws.Context, *ec2.ReplaceVpnTunnelInput, ...request.Option) (*ec2.ReplaceVpnTunnelOutput, error) + ReplaceVpnTunnelRequest(*ec2.ReplaceVpnTunnelInput) (*request.Request, *ec2.ReplaceVpnTunnelOutput) + + ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error) + ReportInstanceStatusWithContext(aws.Context, *ec2.ReportInstanceStatusInput, ...request.Option) (*ec2.ReportInstanceStatusOutput, error) + ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput) + + RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error) + RequestSpotFleetWithContext(aws.Context, *ec2.RequestSpotFleetInput, ...request.Option) (*ec2.RequestSpotFleetOutput, error) + RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput) + + RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error) + RequestSpotInstancesWithContext(aws.Context, *ec2.RequestSpotInstancesInput, ...request.Option) (*ec2.RequestSpotInstancesOutput, error) + RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput) + + ResetAddressAttribute(*ec2.ResetAddressAttributeInput) (*ec2.ResetAddressAttributeOutput, error) + ResetAddressAttributeWithContext(aws.Context, *ec2.ResetAddressAttributeInput, ...request.Option) (*ec2.ResetAddressAttributeOutput, error) + ResetAddressAttributeRequest(*ec2.ResetAddressAttributeInput) (*request.Request, *ec2.ResetAddressAttributeOutput) + + ResetEbsDefaultKmsKeyId(*ec2.ResetEbsDefaultKmsKeyIdInput) (*ec2.ResetEbsDefaultKmsKeyIdOutput, error) + ResetEbsDefaultKmsKeyIdWithContext(aws.Context, *ec2.ResetEbsDefaultKmsKeyIdInput, ...request.Option) (*ec2.ResetEbsDefaultKmsKeyIdOutput, error) + ResetEbsDefaultKmsKeyIdRequest(*ec2.ResetEbsDefaultKmsKeyIdInput) (*request.Request, *ec2.ResetEbsDefaultKmsKeyIdOutput) + + ResetFpgaImageAttribute(*ec2.ResetFpgaImageAttributeInput) (*ec2.ResetFpgaImageAttributeOutput, error) + ResetFpgaImageAttributeWithContext(aws.Context, *ec2.ResetFpgaImageAttributeInput, ...request.Option) (*ec2.ResetFpgaImageAttributeOutput, error) + ResetFpgaImageAttributeRequest(*ec2.ResetFpgaImageAttributeInput) (*request.Request, *ec2.ResetFpgaImageAttributeOutput) + + ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error) + ResetImageAttributeWithContext(aws.Context, *ec2.ResetImageAttributeInput, ...request.Option) (*ec2.ResetImageAttributeOutput, error) + ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput) + + ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error) + ResetInstanceAttributeWithContext(aws.Context, *ec2.ResetInstanceAttributeInput, ...request.Option) (*ec2.ResetInstanceAttributeOutput, error) + ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput) + + ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error) + ResetNetworkInterfaceAttributeWithContext(aws.Context, *ec2.ResetNetworkInterfaceAttributeInput, ...request.Option) (*ec2.ResetNetworkInterfaceAttributeOutput, error) + ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput) + + ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error) + ResetSnapshotAttributeWithContext(aws.Context, *ec2.ResetSnapshotAttributeInput, ...request.Option) (*ec2.ResetSnapshotAttributeOutput, error) + ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput) + + RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error) + RestoreAddressToClassicWithContext(aws.Context, *ec2.RestoreAddressToClassicInput, ...request.Option) (*ec2.RestoreAddressToClassicOutput, error) + RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput) + + RestoreImageFromRecycleBin(*ec2.RestoreImageFromRecycleBinInput) (*ec2.RestoreImageFromRecycleBinOutput, error) + RestoreImageFromRecycleBinWithContext(aws.Context, *ec2.RestoreImageFromRecycleBinInput, ...request.Option) (*ec2.RestoreImageFromRecycleBinOutput, error) + RestoreImageFromRecycleBinRequest(*ec2.RestoreImageFromRecycleBinInput) (*request.Request, *ec2.RestoreImageFromRecycleBinOutput) + + RestoreManagedPrefixListVersion(*ec2.RestoreManagedPrefixListVersionInput) (*ec2.RestoreManagedPrefixListVersionOutput, error) + RestoreManagedPrefixListVersionWithContext(aws.Context, *ec2.RestoreManagedPrefixListVersionInput, ...request.Option) (*ec2.RestoreManagedPrefixListVersionOutput, error) + RestoreManagedPrefixListVersionRequest(*ec2.RestoreManagedPrefixListVersionInput) (*request.Request, *ec2.RestoreManagedPrefixListVersionOutput) + + RestoreSnapshotFromRecycleBin(*ec2.RestoreSnapshotFromRecycleBinInput) (*ec2.RestoreSnapshotFromRecycleBinOutput, error) + RestoreSnapshotFromRecycleBinWithContext(aws.Context, *ec2.RestoreSnapshotFromRecycleBinInput, ...request.Option) (*ec2.RestoreSnapshotFromRecycleBinOutput, error) + RestoreSnapshotFromRecycleBinRequest(*ec2.RestoreSnapshotFromRecycleBinInput) (*request.Request, *ec2.RestoreSnapshotFromRecycleBinOutput) + + RestoreSnapshotTier(*ec2.RestoreSnapshotTierInput) (*ec2.RestoreSnapshotTierOutput, error) + RestoreSnapshotTierWithContext(aws.Context, *ec2.RestoreSnapshotTierInput, ...request.Option) (*ec2.RestoreSnapshotTierOutput, error) + RestoreSnapshotTierRequest(*ec2.RestoreSnapshotTierInput) (*request.Request, *ec2.RestoreSnapshotTierOutput) + + RevokeClientVpnIngress(*ec2.RevokeClientVpnIngressInput) (*ec2.RevokeClientVpnIngressOutput, error) + RevokeClientVpnIngressWithContext(aws.Context, *ec2.RevokeClientVpnIngressInput, ...request.Option) (*ec2.RevokeClientVpnIngressOutput, error) + RevokeClientVpnIngressRequest(*ec2.RevokeClientVpnIngressInput) (*request.Request, *ec2.RevokeClientVpnIngressOutput) + + RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error) + RevokeSecurityGroupEgressWithContext(aws.Context, *ec2.RevokeSecurityGroupEgressInput, ...request.Option) (*ec2.RevokeSecurityGroupEgressOutput, error) + RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput) + + RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) + RevokeSecurityGroupIngressWithContext(aws.Context, *ec2.RevokeSecurityGroupIngressInput, ...request.Option) (*ec2.RevokeSecurityGroupIngressOutput, error) + RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput) + + RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error) + RunInstancesWithContext(aws.Context, *ec2.RunInstancesInput, ...request.Option) (*ec2.Reservation, error) + RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation) + + RunScheduledInstances(*ec2.RunScheduledInstancesInput) (*ec2.RunScheduledInstancesOutput, error) + RunScheduledInstancesWithContext(aws.Context, *ec2.RunScheduledInstancesInput, ...request.Option) (*ec2.RunScheduledInstancesOutput, error) + RunScheduledInstancesRequest(*ec2.RunScheduledInstancesInput) (*request.Request, *ec2.RunScheduledInstancesOutput) + + SearchLocalGatewayRoutes(*ec2.SearchLocalGatewayRoutesInput) (*ec2.SearchLocalGatewayRoutesOutput, error) + SearchLocalGatewayRoutesWithContext(aws.Context, *ec2.SearchLocalGatewayRoutesInput, ...request.Option) (*ec2.SearchLocalGatewayRoutesOutput, error) + SearchLocalGatewayRoutesRequest(*ec2.SearchLocalGatewayRoutesInput) (*request.Request, *ec2.SearchLocalGatewayRoutesOutput) + + SearchLocalGatewayRoutesPages(*ec2.SearchLocalGatewayRoutesInput, func(*ec2.SearchLocalGatewayRoutesOutput, bool) bool) error + SearchLocalGatewayRoutesPagesWithContext(aws.Context, *ec2.SearchLocalGatewayRoutesInput, func(*ec2.SearchLocalGatewayRoutesOutput, bool) bool, ...request.Option) error + + SearchTransitGatewayMulticastGroups(*ec2.SearchTransitGatewayMulticastGroupsInput) (*ec2.SearchTransitGatewayMulticastGroupsOutput, error) + SearchTransitGatewayMulticastGroupsWithContext(aws.Context, *ec2.SearchTransitGatewayMulticastGroupsInput, ...request.Option) (*ec2.SearchTransitGatewayMulticastGroupsOutput, error) + SearchTransitGatewayMulticastGroupsRequest(*ec2.SearchTransitGatewayMulticastGroupsInput) (*request.Request, *ec2.SearchTransitGatewayMulticastGroupsOutput) + + SearchTransitGatewayMulticastGroupsPages(*ec2.SearchTransitGatewayMulticastGroupsInput, func(*ec2.SearchTransitGatewayMulticastGroupsOutput, bool) bool) error + SearchTransitGatewayMulticastGroupsPagesWithContext(aws.Context, *ec2.SearchTransitGatewayMulticastGroupsInput, func(*ec2.SearchTransitGatewayMulticastGroupsOutput, bool) bool, ...request.Option) error + + SearchTransitGatewayRoutes(*ec2.SearchTransitGatewayRoutesInput) (*ec2.SearchTransitGatewayRoutesOutput, error) + SearchTransitGatewayRoutesWithContext(aws.Context, *ec2.SearchTransitGatewayRoutesInput, ...request.Option) (*ec2.SearchTransitGatewayRoutesOutput, error) + SearchTransitGatewayRoutesRequest(*ec2.SearchTransitGatewayRoutesInput) (*request.Request, *ec2.SearchTransitGatewayRoutesOutput) + + SendDiagnosticInterrupt(*ec2.SendDiagnosticInterruptInput) (*ec2.SendDiagnosticInterruptOutput, error) + SendDiagnosticInterruptWithContext(aws.Context, *ec2.SendDiagnosticInterruptInput, ...request.Option) (*ec2.SendDiagnosticInterruptOutput, error) + SendDiagnosticInterruptRequest(*ec2.SendDiagnosticInterruptInput) (*request.Request, *ec2.SendDiagnosticInterruptOutput) + + StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error) + StartInstancesWithContext(aws.Context, *ec2.StartInstancesInput, ...request.Option) (*ec2.StartInstancesOutput, error) + StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput) + + StartNetworkInsightsAccessScopeAnalysis(*ec2.StartNetworkInsightsAccessScopeAnalysisInput) (*ec2.StartNetworkInsightsAccessScopeAnalysisOutput, error) + StartNetworkInsightsAccessScopeAnalysisWithContext(aws.Context, *ec2.StartNetworkInsightsAccessScopeAnalysisInput, ...request.Option) (*ec2.StartNetworkInsightsAccessScopeAnalysisOutput, error) + StartNetworkInsightsAccessScopeAnalysisRequest(*ec2.StartNetworkInsightsAccessScopeAnalysisInput) (*request.Request, *ec2.StartNetworkInsightsAccessScopeAnalysisOutput) + + StartNetworkInsightsAnalysis(*ec2.StartNetworkInsightsAnalysisInput) (*ec2.StartNetworkInsightsAnalysisOutput, error) + StartNetworkInsightsAnalysisWithContext(aws.Context, *ec2.StartNetworkInsightsAnalysisInput, ...request.Option) (*ec2.StartNetworkInsightsAnalysisOutput, error) + StartNetworkInsightsAnalysisRequest(*ec2.StartNetworkInsightsAnalysisInput) (*request.Request, *ec2.StartNetworkInsightsAnalysisOutput) + + StartVpcEndpointServicePrivateDnsVerification(*ec2.StartVpcEndpointServicePrivateDnsVerificationInput) (*ec2.StartVpcEndpointServicePrivateDnsVerificationOutput, error) + StartVpcEndpointServicePrivateDnsVerificationWithContext(aws.Context, *ec2.StartVpcEndpointServicePrivateDnsVerificationInput, ...request.Option) (*ec2.StartVpcEndpointServicePrivateDnsVerificationOutput, error) + StartVpcEndpointServicePrivateDnsVerificationRequest(*ec2.StartVpcEndpointServicePrivateDnsVerificationInput) (*request.Request, *ec2.StartVpcEndpointServicePrivateDnsVerificationOutput) + + StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error) + StopInstancesWithContext(aws.Context, *ec2.StopInstancesInput, ...request.Option) (*ec2.StopInstancesOutput, error) + StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput) + + TerminateClientVpnConnections(*ec2.TerminateClientVpnConnectionsInput) (*ec2.TerminateClientVpnConnectionsOutput, error) + TerminateClientVpnConnectionsWithContext(aws.Context, *ec2.TerminateClientVpnConnectionsInput, ...request.Option) (*ec2.TerminateClientVpnConnectionsOutput, error) + TerminateClientVpnConnectionsRequest(*ec2.TerminateClientVpnConnectionsInput) (*request.Request, *ec2.TerminateClientVpnConnectionsOutput) + + TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) + TerminateInstancesWithContext(aws.Context, *ec2.TerminateInstancesInput, ...request.Option) (*ec2.TerminateInstancesOutput, error) + TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput) + + UnassignIpv6Addresses(*ec2.UnassignIpv6AddressesInput) (*ec2.UnassignIpv6AddressesOutput, error) + UnassignIpv6AddressesWithContext(aws.Context, *ec2.UnassignIpv6AddressesInput, ...request.Option) (*ec2.UnassignIpv6AddressesOutput, error) + UnassignIpv6AddressesRequest(*ec2.UnassignIpv6AddressesInput) (*request.Request, *ec2.UnassignIpv6AddressesOutput) + + UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error) + UnassignPrivateIpAddressesWithContext(aws.Context, *ec2.UnassignPrivateIpAddressesInput, ...request.Option) (*ec2.UnassignPrivateIpAddressesOutput, error) + UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput) + + UnassignPrivateNatGatewayAddress(*ec2.UnassignPrivateNatGatewayAddressInput) (*ec2.UnassignPrivateNatGatewayAddressOutput, error) + UnassignPrivateNatGatewayAddressWithContext(aws.Context, *ec2.UnassignPrivateNatGatewayAddressInput, ...request.Option) (*ec2.UnassignPrivateNatGatewayAddressOutput, error) + UnassignPrivateNatGatewayAddressRequest(*ec2.UnassignPrivateNatGatewayAddressInput) (*request.Request, *ec2.UnassignPrivateNatGatewayAddressOutput) + + UnlockSnapshot(*ec2.UnlockSnapshotInput) (*ec2.UnlockSnapshotOutput, error) + UnlockSnapshotWithContext(aws.Context, *ec2.UnlockSnapshotInput, ...request.Option) (*ec2.UnlockSnapshotOutput, error) + UnlockSnapshotRequest(*ec2.UnlockSnapshotInput) (*request.Request, *ec2.UnlockSnapshotOutput) + + UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error) + UnmonitorInstancesWithContext(aws.Context, *ec2.UnmonitorInstancesInput, ...request.Option) (*ec2.UnmonitorInstancesOutput, error) + UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput) + + UpdateSecurityGroupRuleDescriptionsEgress(*ec2.UpdateSecurityGroupRuleDescriptionsEgressInput) (*ec2.UpdateSecurityGroupRuleDescriptionsEgressOutput, error) + UpdateSecurityGroupRuleDescriptionsEgressWithContext(aws.Context, *ec2.UpdateSecurityGroupRuleDescriptionsEgressInput, ...request.Option) (*ec2.UpdateSecurityGroupRuleDescriptionsEgressOutput, error) + UpdateSecurityGroupRuleDescriptionsEgressRequest(*ec2.UpdateSecurityGroupRuleDescriptionsEgressInput) (*request.Request, *ec2.UpdateSecurityGroupRuleDescriptionsEgressOutput) + + UpdateSecurityGroupRuleDescriptionsIngress(*ec2.UpdateSecurityGroupRuleDescriptionsIngressInput) (*ec2.UpdateSecurityGroupRuleDescriptionsIngressOutput, error) + UpdateSecurityGroupRuleDescriptionsIngressWithContext(aws.Context, *ec2.UpdateSecurityGroupRuleDescriptionsIngressInput, ...request.Option) (*ec2.UpdateSecurityGroupRuleDescriptionsIngressOutput, error) + UpdateSecurityGroupRuleDescriptionsIngressRequest(*ec2.UpdateSecurityGroupRuleDescriptionsIngressInput) (*request.Request, *ec2.UpdateSecurityGroupRuleDescriptionsIngressOutput) + + WithdrawByoipCidr(*ec2.WithdrawByoipCidrInput) (*ec2.WithdrawByoipCidrOutput, error) + WithdrawByoipCidrWithContext(aws.Context, *ec2.WithdrawByoipCidrInput, ...request.Option) (*ec2.WithdrawByoipCidrOutput, error) + WithdrawByoipCidrRequest(*ec2.WithdrawByoipCidrInput) (*request.Request, *ec2.WithdrawByoipCidrOutput) + + WaitUntilBundleTaskComplete(*ec2.DescribeBundleTasksInput) error + WaitUntilBundleTaskCompleteWithContext(aws.Context, *ec2.DescribeBundleTasksInput, ...request.WaiterOption) error + + WaitUntilConversionTaskCancelled(*ec2.DescribeConversionTasksInput) error + WaitUntilConversionTaskCancelledWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.WaiterOption) error + + WaitUntilConversionTaskCompleted(*ec2.DescribeConversionTasksInput) error + WaitUntilConversionTaskCompletedWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.WaiterOption) error + + WaitUntilConversionTaskDeleted(*ec2.DescribeConversionTasksInput) error + WaitUntilConversionTaskDeletedWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.WaiterOption) error + + WaitUntilCustomerGatewayAvailable(*ec2.DescribeCustomerGatewaysInput) error + WaitUntilCustomerGatewayAvailableWithContext(aws.Context, *ec2.DescribeCustomerGatewaysInput, ...request.WaiterOption) error + + WaitUntilExportTaskCancelled(*ec2.DescribeExportTasksInput) error + WaitUntilExportTaskCancelledWithContext(aws.Context, *ec2.DescribeExportTasksInput, ...request.WaiterOption) error + + WaitUntilExportTaskCompleted(*ec2.DescribeExportTasksInput) error + WaitUntilExportTaskCompletedWithContext(aws.Context, *ec2.DescribeExportTasksInput, ...request.WaiterOption) error + + WaitUntilImageAvailable(*ec2.DescribeImagesInput) error + WaitUntilImageAvailableWithContext(aws.Context, *ec2.DescribeImagesInput, ...request.WaiterOption) error + + WaitUntilImageExists(*ec2.DescribeImagesInput) error + WaitUntilImageExistsWithContext(aws.Context, *ec2.DescribeImagesInput, ...request.WaiterOption) error + + WaitUntilInstanceExists(*ec2.DescribeInstancesInput) error + WaitUntilInstanceExistsWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error + + WaitUntilInstanceRunning(*ec2.DescribeInstancesInput) error + WaitUntilInstanceRunningWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error + + WaitUntilInstanceStatusOk(*ec2.DescribeInstanceStatusInput) error + WaitUntilInstanceStatusOkWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, ...request.WaiterOption) error + + WaitUntilInstanceStopped(*ec2.DescribeInstancesInput) error + WaitUntilInstanceStoppedWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error + + WaitUntilInstanceTerminated(*ec2.DescribeInstancesInput) error + WaitUntilInstanceTerminatedWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error + + WaitUntilInternetGatewayExists(*ec2.DescribeInternetGatewaysInput) error + WaitUntilInternetGatewayExistsWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, ...request.WaiterOption) error + + WaitUntilKeyPairExists(*ec2.DescribeKeyPairsInput) error + WaitUntilKeyPairExistsWithContext(aws.Context, *ec2.DescribeKeyPairsInput, ...request.WaiterOption) error + + WaitUntilNatGatewayAvailable(*ec2.DescribeNatGatewaysInput) error + WaitUntilNatGatewayAvailableWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, ...request.WaiterOption) error + + WaitUntilNatGatewayDeleted(*ec2.DescribeNatGatewaysInput) error + WaitUntilNatGatewayDeletedWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, ...request.WaiterOption) error + + WaitUntilNetworkInterfaceAvailable(*ec2.DescribeNetworkInterfacesInput) error + WaitUntilNetworkInterfaceAvailableWithContext(aws.Context, *ec2.DescribeNetworkInterfacesInput, ...request.WaiterOption) error + + WaitUntilPasswordDataAvailable(*ec2.GetPasswordDataInput) error + WaitUntilPasswordDataAvailableWithContext(aws.Context, *ec2.GetPasswordDataInput, ...request.WaiterOption) error + + WaitUntilSecurityGroupExists(*ec2.DescribeSecurityGroupsInput) error + WaitUntilSecurityGroupExistsWithContext(aws.Context, *ec2.DescribeSecurityGroupsInput, ...request.WaiterOption) error + + WaitUntilSnapshotCompleted(*ec2.DescribeSnapshotsInput) error + WaitUntilSnapshotCompletedWithContext(aws.Context, *ec2.DescribeSnapshotsInput, ...request.WaiterOption) error + + WaitUntilSnapshotImported(*ec2.DescribeImportSnapshotTasksInput) error + WaitUntilSnapshotImportedWithContext(aws.Context, *ec2.DescribeImportSnapshotTasksInput, ...request.WaiterOption) error + + WaitUntilSpotInstanceRequestFulfilled(*ec2.DescribeSpotInstanceRequestsInput) error + WaitUntilSpotInstanceRequestFulfilledWithContext(aws.Context, *ec2.DescribeSpotInstanceRequestsInput, ...request.WaiterOption) error + + WaitUntilStoreImageTaskComplete(*ec2.DescribeStoreImageTasksInput) error + WaitUntilStoreImageTaskCompleteWithContext(aws.Context, *ec2.DescribeStoreImageTasksInput, ...request.WaiterOption) error + + WaitUntilSubnetAvailable(*ec2.DescribeSubnetsInput) error + WaitUntilSubnetAvailableWithContext(aws.Context, *ec2.DescribeSubnetsInput, ...request.WaiterOption) error + + WaitUntilSystemStatusOk(*ec2.DescribeInstanceStatusInput) error + WaitUntilSystemStatusOkWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, ...request.WaiterOption) error + + WaitUntilVolumeAvailable(*ec2.DescribeVolumesInput) error + WaitUntilVolumeAvailableWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.WaiterOption) error + + WaitUntilVolumeDeleted(*ec2.DescribeVolumesInput) error + WaitUntilVolumeDeletedWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.WaiterOption) error + + WaitUntilVolumeInUse(*ec2.DescribeVolumesInput) error + WaitUntilVolumeInUseWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.WaiterOption) error + + WaitUntilVpcAvailable(*ec2.DescribeVpcsInput) error + WaitUntilVpcAvailableWithContext(aws.Context, *ec2.DescribeVpcsInput, ...request.WaiterOption) error + + WaitUntilVpcExists(*ec2.DescribeVpcsInput) error + WaitUntilVpcExistsWithContext(aws.Context, *ec2.DescribeVpcsInput, ...request.WaiterOption) error + + WaitUntilVpcPeeringConnectionDeleted(*ec2.DescribeVpcPeeringConnectionsInput) error + WaitUntilVpcPeeringConnectionDeletedWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, ...request.WaiterOption) error + + WaitUntilVpcPeeringConnectionExists(*ec2.DescribeVpcPeeringConnectionsInput) error + WaitUntilVpcPeeringConnectionExistsWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, ...request.WaiterOption) error + + WaitUntilVpnConnectionAvailable(*ec2.DescribeVpnConnectionsInput) error + WaitUntilVpnConnectionAvailableWithContext(aws.Context, *ec2.DescribeVpnConnectionsInput, ...request.WaiterOption) error + + WaitUntilVpnConnectionDeleted(*ec2.DescribeVpnConnectionsInput) error + WaitUntilVpnConnectionDeletedWithContext(aws.Context, *ec2.DescribeVpnConnectionsInput, ...request.WaiterOption) error +} + +var _ EC2API = (*ec2.EC2)(nil) diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go index 7d3e1536b36..0281b3ee584 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/migrate.proto package annotations diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go index 71957789538..cf858bd9773 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/security.proto package annotations diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go index 8631b8568c1..2d5c78dc29a 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/sensitive.proto package annotations diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go index f2fdc3ca388..c96818b17cd 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/status.proto package annotations diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go index df83e0a2eb5..b3ab9e346b0 100644 --- a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: udpa/annotations/versioning.proto package annotations diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go index ad24b1f7f6c..705a71e8873 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/migrate.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go index 61df6890bd3..0278e516589 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/security.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go index 274eace058d..57161aab476 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/sensitive.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go index 2497e0b2fea..255d109fc51 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/status.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go index 2307dc874a4..2de032f159c 100644 --- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/annotations/v3/versioning.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go index 3c361216c0d..3058286d575 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/authority.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go index d7be5c4d27f..0e339b5899d 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/cidr.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go index 52b520af4ea..0d45b961bf2 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/collection_entry.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go index 563775a1fb5..714ab436734 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/context_params.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go index 476fa47c23e..be4ea10c6b2 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/extension.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go index 9402230d565..641e3411ac3 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/resource.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go index 50fe599dbfe..3f99d4beeca 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/resource_locator.proto package v3 diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go index 92d5fa85395..3d42818b7a3 100644 --- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v3.21.5 +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 // source: xds/core/v3/resource_name.proto package v3 diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index ffbccf5b688..37127216971 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,24 @@ # Change Log +## [v1.126.0] - 2024-09-25 + +- #732 - @gottwald - DOKS: add custom CIDR fields +- #727 - @loosla - [databases]: add support for Kafka advanced configuration + +## [v1.125.0] - 2024-09-17 + +- #726 - @loosla - [databases]: add support for MongoDB advanced configuration +- #724 - @andrewsomething - Bump go version to 1.22 +- #723 - @jauderho - Update Go dependencies and remove replace statements + +## [v1.124.0] - 2024-09-10 + +- #721 - @vsharma6855 - [DBAAS] | Add API endpoint for applying cluster patches + +## [v1.123.0] - 2024-09-06 + +- #719 - @andrewsomething - apps: mark ListTiers and GetTier as deprecated + ## [v1.122.0] - 2024-09-04 - #717 - @danaelhe - DB: Fix Logsink Attribute Types diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go index ebf341c0429..ac792658e20 100644 --- a/vendor/github.com/digitalocean/godo/apps.go +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -384,6 +384,9 @@ func (s *AppsServiceOp) ListRegions(ctx context.Context) ([]*AppRegion, *Respons } // ListTiers lists available app tiers. +// +// Deprecated: The '/v2/apps/tiers' endpoint has been deprecated as app tiers +// are no longer tied to instance sizes. The concept of tiers is being retired. func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, error) { path := fmt.Sprintf("%s/tiers", appsBasePath) req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) @@ -399,6 +402,9 @@ func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, e } // GetTier retrieves information about a specific app tier. +// +// Deprecated: The '/v2/apps/tiers/{slug}' endpoints have been deprecated as app +// tiers are no longer tied to instance sizes. The concept of tiers is being retired. func (s *AppsServiceOp) GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) { path := fmt.Sprintf("%s/tiers/%s", appsBasePath, slug) req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 3b6869cfef9..e168186ff47 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -3,6 +3,7 @@ package godo import ( "context" "fmt" + "math/big" "net/http" "strings" "time" @@ -16,6 +17,7 @@ const ( databaseResizePath = databaseBasePath + "/%s/resize" databaseMigratePath = databaseBasePath + "/%s/migrate" databaseMaintenancePath = databaseBasePath + "/%s/maintenance" + databaseUpdateInstallationPath = databaseBasePath + "/%s/install_update" databaseBackupsPath = databaseBasePath + "/%s/backups" databaseUsersPath = databaseBasePath + "/%s/users" databaseUserPath = databaseBasePath + "/%s/users/%s" @@ -120,6 +122,7 @@ type DatabasesService interface { Resize(context.Context, string, *DatabaseResizeRequest) (*Response, error) Migrate(context.Context, string, *DatabaseMigrateRequest) (*Response, error) UpdateMaintenance(context.Context, string, *DatabaseUpdateMaintenanceRequest) (*Response, error) + InstallUpdate(context.Context, string) (*Response, error) ListBackups(context.Context, string, *ListOptions) ([]DatabaseBackup, *Response, error) GetUser(context.Context, string, string) (*DatabaseUser, *Response, error) ListUsers(context.Context, string, *ListOptions) ([]DatabaseUser, *Response, error) @@ -150,9 +153,13 @@ type DatabasesService interface { GetPostgreSQLConfig(context.Context, string) (*PostgreSQLConfig, *Response, error) GetRedisConfig(context.Context, string) (*RedisConfig, *Response, error) GetMySQLConfig(context.Context, string) (*MySQLConfig, *Response, error) + GetMongoDBConfig(context.Context, string) (*MongoDBConfig, *Response, error) + GetKafkaConfig(context.Context, string) (*KafkaConfig, *Response, error) UpdatePostgreSQLConfig(context.Context, string, *PostgreSQLConfig) (*Response, error) UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error) UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error) + UpdateMongoDBConfig(context.Context, string, *MongoDBConfig) (*Response, error) + UpdateKafkaConfig(context.Context, string, *KafkaConfig) (*Response, error) ListOptions(todo context.Context) (*DatabaseOptions, *Response, error) UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error) ListTopics(context.Context, string, *ListOptions) ([]DatabaseTopic, *Response, error) @@ -646,6 +653,36 @@ type MySQLConfig struct { BinlogRetentionPeriod *int `json:"binlog_retention_period,omitempty"` } +// MongoDBConfig holds advanced configurations for MongoDB database clusters. +type MongoDBConfig struct { + DefaultReadConcern *string `json:"default_read_concern,omitempty"` + DefaultWriteConcern *string `json:"default_write_concern,omitempty"` + TransactionLifetimeLimitSeconds *int `json:"transaction_lifetime_limit_seconds,omitempty"` + SlowOpThresholdMs *int `json:"slow_op_threshold_ms,omitempty"` + Verbosity *int `json:"verbosity,omitempty"` +} + +// KafkaConfig holds advanced configurations for Kafka database clusters. +type KafkaConfig struct { + GroupInitialRebalanceDelayMs *int `json:"group_initial_rebalance_delay_ms,omitempty"` + GroupMinSessionTimeoutMs *int `json:"group_min_session_timeout_ms,omitempty"` + GroupMaxSessionTimeoutMs *int `json:"group_max_session_timeout_ms,omitempty"` + MessageMaxBytes *int `json:"message_max_bytes,omitempty"` + LogCleanerDeleteRetentionMs *int64 `json:"log_cleaner_delete_retention_ms,omitempty"` + LogCleanerMinCompactionLagMs *uint64 `json:"log_cleaner_min_compaction_lag_ms,omitempty"` + LogFlushIntervalMs *uint64 `json:"log_flush_interval_ms,omitempty"` + LogIndexIntervalBytes *int `json:"log_index_interval_bytes,omitempty"` + LogMessageDownconversionEnable *bool `json:"log_message_downconversion_enable,omitempty"` + LogMessageTimestampDifferenceMaxMs *uint64 `json:"log_message_timestamp_difference_max_ms,omitempty"` + LogPreallocate *bool `json:"log_preallocate,omitempty"` + LogRetentionBytes *big.Int `json:"log_retention_bytes,omitempty"` + LogRetentionHours *int `json:"log_retention_hours,omitempty"` + LogRetentionMs *big.Int `json:"log_retention_ms,omitempty"` + LogRollJitterMs *uint64 `json:"log_roll_jitter_ms,omitempty"` + LogSegmentDeleteDelayMs *int `json:"log_segment_delete_delay_ms,omitempty"` + AutoCreateTopicsEnable *bool `json:"auto_create_topics_enable,omitempty"` +} + type databaseUserRoot struct { User *DatabaseUser `json:"user"` } @@ -686,6 +723,14 @@ type databaseMySQLConfigRoot struct { Config *MySQLConfig `json:"config"` } +type databaseMongoDBConfigRoot struct { + Config *MongoDBConfig `json:"config"` +} + +type databaseKafkaConfigRoot struct { + Config *KafkaConfig `json:"config"` +} + type databaseBackupsRoot struct { Backups []DatabaseBackup `json:"backups"` } @@ -940,6 +985,20 @@ func (svc *DatabasesServiceOp) UpdateMaintenance(ctx context.Context, databaseID return resp, nil } +// InstallUpdate starts installation of updates +func (svc *DatabasesServiceOp) InstallUpdate(ctx context.Context, databaseID string) (*Response, error) { + path := fmt.Sprintf(databaseUpdateInstallationPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListBackups returns a list of the current backups of a database func (svc *DatabasesServiceOp) ListBackups(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseBackup, *Response, error) { path := fmt.Sprintf(databaseBackupsPath, databaseID) @@ -1483,6 +1542,70 @@ func (svc *DatabasesServiceOp) UpdateMySQLConfig(ctx context.Context, databaseID return resp, nil } +// GetMongoDBConfig retrieves the config for a MongoDB database cluster. +func (svc *DatabasesServiceOp) GetMongoDBConfig(ctx context.Context, databaseID string) (*MongoDBConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseMongoDBConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateMongoDBConfig updates the config for a MongoDB database cluster. +func (svc *DatabasesServiceOp) UpdateMongoDBConfig(ctx context.Context, databaseID string, config *MongoDBConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseMongoDBConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// GetKafkaConfig retrieves the config for a Kafka database cluster. +func (svc *DatabasesServiceOp) GetKafkaConfig(ctx context.Context, databaseID string) (*KafkaConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseKafkaConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateKafkaConfig updates the config for a Kafka database cluster. +func (svc *DatabasesServiceOp) UpdateKafkaConfig(ctx context.Context, databaseID string, config *KafkaConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseKafkaConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListOptions gets the database options available. func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOptions, *Response, error) { root := new(databaseOptionsRoot) diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 8bc4a098ba6..3702ac1f72f 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.122.0" + libraryVersion = "1.126.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index 8ef9d241e2a..9b3bcfa1a63 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -65,11 +65,13 @@ type KubernetesServiceOp struct { // KubernetesClusterCreateRequest represents a request to create a Kubernetes cluster. type KubernetesClusterCreateRequest struct { - Name string `json:"name,omitempty"` - RegionSlug string `json:"region,omitempty"` - VersionSlug string `json:"version,omitempty"` - Tags []string `json:"tags,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` + Name string `json:"name,omitempty"` + RegionSlug string `json:"region,omitempty"` + VersionSlug string `json:"version,omitempty"` + Tags []string `json:"tags,omitempty"` + VPCUUID string `json:"vpc_uuid,omitempty"` + ClusterSubnet string `json:"cluster_subnet,omitempty"` + ServiceSubnet string `json:"service_subnet,omitempty"` // Create cluster with highly available control plane HA bool `json:"ha"` diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 86b01839b98..7164e1eba53 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -393,7 +393,7 @@ definitions: Make the mount non-recursively read-only, but still leave the mount recursive (unless NonRecursive is set to `true` in conjunction). - Addded in v1.44, before that version all read-only mounts were + Added in v1.44, before that version all read-only mounts were non-recursive by default. To match the previous behaviour this will default to `true` for clients on versions prior to v1.44. type: "boolean" @@ -1384,7 +1384,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" example: "" Domainname: @@ -1394,7 +1394,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" example: "" User: @@ -1408,7 +1408,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1419,7 +1419,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1430,7 +1430,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1457,7 +1457,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1468,7 +1468,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1479,7 +1479,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1516,7 +1516,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" default: "" example: "" @@ -1555,7 +1555,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1567,7 +1567,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "string" default: "" example: "" @@ -1601,7 +1601,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "integer" default: 10 x-nullable: true @@ -2216,7 +2216,7 @@ definitions: Created: description: | Date and time at which the image was created as a Unix timestamp - (number of seconds sinds EPOCH). + (number of seconds since EPOCH). type: "integer" x-nullable: false example: "1644009612" @@ -2513,7 +2513,7 @@ definitions: example: false Attachable: description: | - Wheter a global / swarm scope network is manually attachable by regular + Whether a global / swarm scope network is manually attachable by regular containers from workers in swarm mode. type: "boolean" default: false @@ -3736,7 +3736,7 @@ definitions: example: "json-file" Options: description: | - Driver-specific options for the selectd log driver, specified + Driver-specific options for the selected log driver, specified as key/value pairs. type: "object" additionalProperties: @@ -5347,7 +5347,7 @@ definitions: The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" - example: "go1.21.13" + example: "go1.22.7" Os: description: | The operating system that the daemon is running on ("linux" or "windows") @@ -7712,7 +7712,7 @@ paths: * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` - * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] @@ -9226,12 +9226,23 @@ paths: parameters: - name: "name" in: "path" - description: "Image name or ID." + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. type: "string" required: true - name: "tag" in: "query" - description: "The tag to associate with the image on the registry." + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. type: "string" - name: "X-Registry-Auth" in: "header" diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index 727da8839cc..03648fb7b5d 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -1,6 +1,7 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "errors" "fmt" "strings" @@ -325,12 +326,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error { if policy.MaximumRetryCount < 0 { msg += " and cannot be negative" } - return &errInvalidParameter{fmt.Errorf(msg)} + return &errInvalidParameter{errors.New(msg)} } return nil case RestartPolicyOnFailure: if policy.MaximumRetryCount < 0 { - return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} + return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")} } return nil case "": diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 0c39ab5f18b..0914b2a4410 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -196,7 +196,7 @@ func (args Args) Match(field, source string) bool { } // GetBoolOrDefault returns a boolean value of the key if the key is present -// and is intepretable as a boolean value. Otherwise the default value is returned. +// and is interpretable as a boolean value. Otherwise the default value is returned. // Error is not nil only if the filter values are not valid boolean or are conflicting. func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { fieldValues, ok := args.fields[key] diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go index c7168fe62ea..e87e216a28b 100644 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -12,7 +12,7 @@ type Summary struct { Containers int64 `json:"Containers"` // Date and time at which the image was created as a Unix timestamp - // (number of seconds sinds EPOCH). + // (number of seconds since EPOCH). // // Required: true Created int64 `json:"Created"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 3eae4b9b297..1b4be6fffba 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -122,7 +122,7 @@ type CAConfig struct { SigningCAKey string `json:",omitempty"` // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. + // then the swarm is forced to generate a new root certificate and key. ForceRotate uint64 `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go index bbd9ff0b8f9..618a4816209 100644 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -414,7 +414,7 @@ type Info struct { // the Volume has not been successfully created yet. VolumeID string `json:",omitempty"` - // AccessibleTopolgoy is the topology this volume is actually accessible + // AccessibleTopology is the topology this volume is actually accessible // from. AccessibleTopology []Topology `json:",omitempty"` } diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342acdc..00000000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md deleted file mode 100644 index 5492dd94485..00000000000 --- a/vendor/github.com/go-kit/kit/log/README.md +++ /dev/null @@ -1,160 +0,0 @@ -# package log - -**Deprecation notice:** The core Go kit log packages (log, log/level, log/term, and -log/syslog) have been moved to their own repository at github.com/go-kit/log. -The corresponding packages in this directory remain for backwards compatibility. -Their types alias the types and their functions call the functions provided by -the new repository. Using either import path should be equivalent. Prefer the -new import path when practical. - -______ - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled -logging, and so on. It can be used for both typical application log events, -and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are -_data_, and warrant some level of schematic rigor. Using a stricter, -key/value-oriented message format for our logs, containing contextual and -semantic information, makes it much easier to get insight into the -operational activity of the systems we build. Consequently, `package log` is -of the strong belief that "[the benefits of structured logging outweigh the -minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier -than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -w := log.NewSyncWriter(os.Stderr) -logger := log.NewLogfmtLogger(w) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Contextual Loggers - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - logger = log.With(logger, "instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.With(logger, "component", "worker")).Run() - NewSlacker(log.With(logger, "component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, you need to pipe all of your logging through the -stdlib log package, you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) -logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Levels - -Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level). - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like is the product of much iteration -and evaluation. For more details on the evolution of the Logger interface, -see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), -a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see -[#63](https://github.com/go-kit/kit/issues/63), -[#76](https://github.com/go-kit/kit/pull/76), -[#131](https://github.com/go-kit/kit/issues/131), -[#157](https://github.com/go-kit/kit/pull/157), -[#164](https://github.com/go-kit/kit/issues/164), and -[#252](https://github.com/go-kit/kit/pull/252) -to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, -like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), -are of course welcome. Good proposals should - -- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go deleted file mode 100644 index c9873f4bcea..00000000000 --- a/vendor/github.com/go-kit/kit/log/doc.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package log provides a structured logger. -// -// Deprecated: Use github.com/go-kit/log instead. -// -// Structured logging produces logs easily consumed later by humans or -// machines. Humans might be interested in debugging errors, or tracing -// specific requests. Machines might be interested in counting interesting -// events, or aggregating information for off-line processing. In both cases, -// it is important that the log messages are structured and actionable. -// Package log is designed to encourage both of these best practices. -// -// Basic Usage -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. The Logger interface has a single method, Log, which -// accepts a sequence of alternating key/value pairs, which this package names -// keyvals. -// -// type Logger interface { -// Log(keyvals ...interface{}) error -// } -// -// Here is an example of a function using a Logger to create log events. -// -// func RunTask(task Task, logger log.Logger) string { -// logger.Log("taskID", task.ID, "event", "starting task") -// ... -// logger.Log("taskID", task.ID, "event", "task complete") -// } -// -// The keys in the above example are "taskID" and "event". The values are -// task.ID, "starting task", and "task complete". Every key is followed -// immediately by its value. -// -// Keys are usually plain strings. Values may be any type that has a sensible -// encoding in the chosen log format. With structured logging it is a good -// idea to log simple values without formatting them. This practice allows -// the chosen logger to encode values in the most appropriate way. -// -// Contextual Loggers -// -// A contextual logger stores keyvals that it includes in all log events. -// Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With, WithPrefix, and WithSuffix -// add context to a logger. We can use With to improve the RunTask example. -// -// func RunTask(task Task, logger log.Logger) string { -// logger = log.With(logger, "taskID", task.ID) -// logger.Log("event", "starting task") -// ... -// taskHelper(task.Cmd, logger) -// ... -// logger.Log("event", "task complete") -// } -// -// The improved version emits the same log events as the original for the -// first and last calls to Log. Passing the contextual logger to taskHelper -// enables each log event created by taskHelper to include the task.ID even -// though taskHelper does not have access to that value. Using contextual -// loggers this way simplifies producing log output that enables tracing the -// life cycle of individual tasks. (See the Contextual example for the full -// code of the above snippet.) -// -// Dynamic Contextual Values -// -// A Valuer function stored in a contextual logger generates a new value each -// time an event is logged. The Valuer example demonstrates how this feature -// works. -// -// Valuers provide the basis for consistently logging timestamps and source -// code location. The log package defines several valuers for that purpose. -// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and -// DefaultCaller. A common logger initialization sequence that ensures all log -// entries contain a timestamp and source location looks like this: -// -// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) -// -// Concurrent Safety -// -// Applications with multiple goroutines want each log event written to the -// same logger to remain separate from other log events. Package log provides -// two simple solutions for concurrent safe logging. -// -// NewSyncWriter wraps an io.Writer and serializes each call to its Write -// method. Using a SyncWriter has the benefit that the smallest practical -// portion of the logging logic is performed within a mutex, but it requires -// the formatting Logger to make only one call to Write per log event. -// -// NewSyncLogger wraps any Logger and serializes each call to its Log method. -// Using a SyncLogger has the benefit that it guarantees each log event is -// handled atomically within the wrapped logger, but it typically serializes -// both the formatting and output logic. Use a SyncLogger if the formatting -// logger may perform multiple writes per log event. -// -// Error Handling -// -// This package relies on the practice of wrapping or decorating loggers with -// other loggers to provide composable pieces of functionality. It also means -// that Logger.Log must return an error because some -// implementations—especially those that output log data to an io.Writer—may -// encounter errors that cannot be handled locally. This in turn means that -// Loggers that wrap other loggers should return errors from the wrapped -// logger up the stack. -// -// Fortunately, the decorator pattern also provides a way to avoid the -// necessity to check for errors every time an application calls Logger.Log. -// An application required to panic whenever its Logger encounters -// an error could initialize its logger as follows. -// -// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger := log.LoggerFunc(func(keyvals ...interface{}) error { -// if err := fmtlogger.Log(keyvals...); err != nil { -// panic(err) -// } -// return nil -// }) -package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go deleted file mode 100644 index edfde2f4632..00000000000 --- a/vendor/github.com/go-kit/kit/log/json_logger.go +++ /dev/null @@ -1,15 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewJSONLogger(w io.Writer) Logger { - return log.NewJSONLogger(w) -} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go deleted file mode 100644 index 7baf8708a3f..00000000000 --- a/vendor/github.com/go-kit/kit/log/level/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package level implements leveled logging on top of Go kit's log package. -// -// Deprecated: Use github.com/go-kit/log/level instead. -// -// To use the level package, create a logger as per normal in your func main, -// and wrap it with level.NewFilter. -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.AllowInfo()) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error -// helper methods to emit leveled log events. -// -// logger.Log("foo", "bar") // as normal, no level -// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) -// if value > 100 { -// level.Error(logger).Log("value", value) -// } -// -// NewFilter allows precise control over what happens when a log event is -// emitted without a level key, or if a squelched level is used. Check the -// Option functions for details. -package level diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go deleted file mode 100644 index 803e8b96cd0..00000000000 --- a/vendor/github.com/go-kit/kit/log/level/level.go +++ /dev/null @@ -1,120 +0,0 @@ -package level - -import ( - "github.com/go-kit/log" - "github.com/go-kit/log/level" -) - -// Error returns a logger that includes a Key/ErrorValue pair. -func Error(logger log.Logger) log.Logger { - return level.Error(logger) -} - -// Warn returns a logger that includes a Key/WarnValue pair. -func Warn(logger log.Logger) log.Logger { - return level.Warn(logger) -} - -// Info returns a logger that includes a Key/InfoValue pair. -func Info(logger log.Logger) log.Logger { - return level.Info(logger) -} - -// Debug returns a logger that includes a Key/DebugValue pair. -func Debug(logger log.Logger) log.Logger { - return level.Debug(logger) -} - -// NewFilter wraps next and implements level filtering. See the commentary on -// the Option functions for a detailed description of how to configure levels. -// If no options are provided, all leveled log events created with Debug, -// Info, Warn or Error helper methods are squelched and non-leveled log -// events are passed to next unmodified. -func NewFilter(next log.Logger, options ...Option) log.Logger { - return level.NewFilter(next, options...) -} - -// Option sets a parameter for the leveled logger. -type Option = level.Option - -// AllowAll is an alias for AllowDebug. -func AllowAll() Option { - return level.AllowAll() -} - -// AllowDebug allows error, warn, info and debug level log events to pass. -func AllowDebug() Option { - return level.AllowDebug() -} - -// AllowInfo allows error, warn and info level log events to pass. -func AllowInfo() Option { - return level.AllowInfo() -} - -// AllowWarn allows error and warn level log events to pass. -func AllowWarn() Option { - return level.AllowWarn() -} - -// AllowError allows only error level log events to pass. -func AllowError() Option { - return level.AllowError() -} - -// AllowNone allows no leveled log events to pass. -func AllowNone() Option { - return level.AllowNone() -} - -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return level.ErrNotAllowed(err) -} - -// SquelchNoLevel instructs Log to squelch log events with no level, so that -// they don't proceed through to the wrapped logger. If SquelchNoLevel is set -// to true and a log event is squelched in this way, the error value -// configured with ErrNoLevel is returned to the caller. -func SquelchNoLevel(squelch bool) Option { - return level.SquelchNoLevel(squelch) -} - -// ErrNoLevel sets the error to return from Log when it squelches a log event -// with no level. By default, ErrNoLevel is nil; in this case the log event is -// squelched with no error. -func ErrNoLevel(err error) Option { - return level.ErrNoLevel(err) -} - -// NewInjector wraps next and returns a logger that adds a Key/level pair to -// the beginning of log events that don't already contain a level. In effect, -// this gives a default level to logs without a level. -func NewInjector(next log.Logger, lvl Value) log.Logger { - return level.NewInjector(next, lvl) -} - -// Value is the interface that each of the canonical level values implement. -// It contains unexported methods that prevent types from other packages from -// implementing it and guaranteeing that NewFilter can distinguish the levels -// defined in this package from all other values. -type Value = level.Value - -// Key returns the unique key added to log events by the loggers in this -// package. -func Key() interface{} { return level.Key() } - -// ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return level.ErrorValue() } - -// WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return level.WarnValue() } - -// InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return level.InfoValue() } - -// DebugValue returns the unique value added to log events by Debug. -func DebugValue() Value { return level.DebugValue() } diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go deleted file mode 100644 index 164a4f94a84..00000000000 --- a/vendor/github.com/go-kit/kit/log/log.go +++ /dev/null @@ -1,51 +0,0 @@ -package log - -import ( - "github.com/go-kit/log" -) - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies or retains any of its elements must make a copy first. -type Logger = log.Logger - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = log.ErrMissingValue - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With, -// WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func With(logger Logger, keyvals ...interface{}) Logger { - return log.With(logger, keyvals...) -} - -// WithPrefix returns a new contextual logger with keyvals prepended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - return log.WithPrefix(logger, keyvals...) -} - -// WithSuffix returns a new contextual logger with keyvals appended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithSuffix(logger Logger, keyvals ...interface{}) Logger { - return log.WithSuffix(logger, keyvals...) -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc = log.LoggerFunc diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go deleted file mode 100644 index 51cde2c566f..00000000000 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go +++ /dev/null @@ -1,15 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return log.NewLogfmtLogger(w) -} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go deleted file mode 100644 index b02c686064d..00000000000 --- a/vendor/github.com/go-kit/kit/log/nop_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package log - -import "github.com/go-kit/log" - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { - return log.NewNopLogger() -} diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go deleted file mode 100644 index cb604a7a85a..00000000000 --- a/vendor/github.com/go-kit/kit/log/stdlib.go +++ /dev/null @@ -1,54 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter = log.StdlibWriter - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter = log.StdlibAdapter - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption = log.StdlibAdapterOption - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return log.TimestampKey(key) -} - -// FileKey sets the key for the file and line field. By default, it's "caller". -func FileKey(key string) StdlibAdapterOption { - return log.FileKey(key) -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return log.MessageKey(key) -} - -// Prefix configures the adapter to parse a prefix from stdlib log events. If -// you provide a non-empty prefix to the stdlib logger, then your should provide -// that same prefix to the adapter via this option. -// -// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to -// true if you want to include the parsed prefix in the msg. -func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption { - return log.Prefix(prefix, joinPrefixToMsg) -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - return log.NewStdlibAdapter(logger, options...) -} diff --git a/vendor/github.com/go-kit/kit/log/sync.go b/vendor/github.com/go-kit/kit/log/sync.go deleted file mode 100644 index bcfee2bfd2b..00000000000 --- a/vendor/github.com/go-kit/kit/log/sync.go +++ /dev/null @@ -1,37 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// SwapLogger wraps another logger that may be safely replaced while other -// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger -// will discard all log events without error. -// -// SwapLogger serves well as a package global logger that can be changed by -// importers. -type SwapLogger = log.SwapLogger - -// NewSyncWriter returns a new writer that is safe for concurrent use by -// multiple goroutines. Writes to the returned writer are passed on to w. If -// another write is already in progress, the calling goroutine blocks until -// the writer is available. -// -// If w implements the following interface, so does the returned writer. -// -// interface { -// Fd() uintptr -// } -func NewSyncWriter(w io.Writer) io.Writer { - return log.NewSyncWriter(w) -} - -// NewSyncLogger returns a logger that synchronizes concurrent use of the -// wrapped logger. When multiple goroutines use the SyncLogger concurrently -// only one goroutine will be allowed to log to the wrapped logger at a time. -// The other goroutines will block until the logger is available. -func NewSyncLogger(logger Logger) Logger { - return log.NewSyncLogger(logger) -} diff --git a/vendor/github.com/go-kit/kit/log/value.go b/vendor/github.com/go-kit/kit/log/value.go deleted file mode 100644 index 96d783bd5d9..00000000000 --- a/vendor/github.com/go-kit/kit/log/value.go +++ /dev/null @@ -1,52 +0,0 @@ -package log - -import ( - "time" - - "github.com/go-kit/log" -) - -// A Valuer generates a log value. When passed to With, WithPrefix, or -// WithSuffix in a value element (odd indexes), it represents a dynamic -// value which is re-evaluated with each log event. -type Valuer = log.Valuer - -// Timestamp returns a timestamp Valuer. It invokes the t function to get the -// time; unless you are doing something tricky, pass time.Now. -// -// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which -// are TimestampFormats that use the RFC3339Nano format. -func Timestamp(t func() time.Time) Valuer { - return log.Timestamp(t) -} - -// TimestampFormat returns a timestamp Valuer with a custom time format. It -// invokes the t function to get the time to format; unless you are doing -// something tricky, pass time.Now. The layout string is passed to -// Time.Format. -// -// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which -// are TimestampFormats that use the RFC3339Nano format. -func TimestampFormat(t func() time.Time, layout string) Valuer { - return log.TimestampFormat(t, layout) -} - -// Caller returns a Valuer that returns a file and line from a specified depth -// in the callstack. Users will probably want to use DefaultCaller. -func Caller(depth int) Valuer { - return log.Caller(depth) -} - -var ( - // DefaultTimestamp is a Valuer that returns the current wallclock time, - // respecting time zones, when bound. - DefaultTimestamp = log.DefaultTimestamp - - // DefaultTimestampUTC is a Valuer that returns the current time in UTC - // when bound. - DefaultTimestampUTC = log.DefaultTimestampUTC - - // DefaultCaller is a Valuer that returns the file and line where the Log - // method was invoked. It can only be used with log.With. - DefaultCaller = log.DefaultCaller -) diff --git a/vendor/github.com/go-kit/log/.gitignore b/vendor/github.com/go-kit/log/.gitignore deleted file mode 100644 index 66fd13c903c..00000000000 --- a/vendor/github.com/go-kit/log/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ diff --git a/vendor/github.com/go-kit/log/LICENSE b/vendor/github.com/go-kit/log/LICENSE deleted file mode 100644 index bb5bdb9cb8c..00000000000 --- a/vendor/github.com/go-kit/log/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Go kit - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md deleted file mode 100644 index 8067794657c..00000000000 --- a/vendor/github.com/go-kit/log/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# package log - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log) -[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log) -[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml) -[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main) - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled -logging, and so on. It can be used for both typical application log events, -and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are -_data_, and warrant some level of schematic rigor. Using a stricter, -key/value-oriented message format for our logs, containing contextual and -semantic information, makes it much easier to get insight into the -operational activity of the systems we build. Consequently, `package log` is -of the strong belief that "[the benefits of structured logging outweigh the -minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier -than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -w := log.NewSyncWriter(os.Stderr) -logger := log.NewLogfmtLogger(w) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Contextual Loggers - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - logger = log.With(logger, "instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.With(logger, "component", "worker")).Run() - NewSlacker(log.With(logger, "component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, you need to pipe all of your logging through the -stdlib log package, you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) -logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Levels - -Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/log/level). - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like is the product of much iteration -and evaluation. For more details on the evolution of the Logger interface, -see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), -a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see -[#63](https://github.com/go-kit/kit/issues/63), -[#76](https://github.com/go-kit/kit/pull/76), -[#131](https://github.com/go-kit/kit/issues/131), -[#157](https://github.com/go-kit/kit/pull/157), -[#164](https://github.com/go-kit/kit/issues/164), and -[#252](https://github.com/go-kit/kit/pull/252) -to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, -like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/log/level), -are of course welcome. Good proposals should - -- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/log#With), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/log#Caller) in any wrapped contextual loggers, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/log/doc.go b/vendor/github.com/go-kit/log/doc.go deleted file mode 100644 index f744382fe49..00000000000 --- a/vendor/github.com/go-kit/log/doc.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package log provides a structured logger. -// -// Structured logging produces logs easily consumed later by humans or -// machines. Humans might be interested in debugging errors, or tracing -// specific requests. Machines might be interested in counting interesting -// events, or aggregating information for off-line processing. In both cases, -// it is important that the log messages are structured and actionable. -// Package log is designed to encourage both of these best practices. -// -// Basic Usage -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. The Logger interface has a single method, Log, which -// accepts a sequence of alternating key/value pairs, which this package names -// keyvals. -// -// type Logger interface { -// Log(keyvals ...interface{}) error -// } -// -// Here is an example of a function using a Logger to create log events. -// -// func RunTask(task Task, logger log.Logger) string { -// logger.Log("taskID", task.ID, "event", "starting task") -// ... -// logger.Log("taskID", task.ID, "event", "task complete") -// } -// -// The keys in the above example are "taskID" and "event". The values are -// task.ID, "starting task", and "task complete". Every key is followed -// immediately by its value. -// -// Keys are usually plain strings. Values may be any type that has a sensible -// encoding in the chosen log format. With structured logging it is a good -// idea to log simple values without formatting them. This practice allows -// the chosen logger to encode values in the most appropriate way. -// -// Contextual Loggers -// -// A contextual logger stores keyvals that it includes in all log events. -// Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With, WithPrefix, and WithSuffix -// add context to a logger. We can use With to improve the RunTask example. -// -// func RunTask(task Task, logger log.Logger) string { -// logger = log.With(logger, "taskID", task.ID) -// logger.Log("event", "starting task") -// ... -// taskHelper(task.Cmd, logger) -// ... -// logger.Log("event", "task complete") -// } -// -// The improved version emits the same log events as the original for the -// first and last calls to Log. Passing the contextual logger to taskHelper -// enables each log event created by taskHelper to include the task.ID even -// though taskHelper does not have access to that value. Using contextual -// loggers this way simplifies producing log output that enables tracing the -// life cycle of individual tasks. (See the Contextual example for the full -// code of the above snippet.) -// -// Dynamic Contextual Values -// -// A Valuer function stored in a contextual logger generates a new value each -// time an event is logged. The Valuer example demonstrates how this feature -// works. -// -// Valuers provide the basis for consistently logging timestamps and source -// code location. The log package defines several valuers for that purpose. -// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and -// DefaultCaller. A common logger initialization sequence that ensures all log -// entries contain a timestamp and source location looks like this: -// -// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) -// -// Concurrent Safety -// -// Applications with multiple goroutines want each log event written to the -// same logger to remain separate from other log events. Package log provides -// two simple solutions for concurrent safe logging. -// -// NewSyncWriter wraps an io.Writer and serializes each call to its Write -// method. Using a SyncWriter has the benefit that the smallest practical -// portion of the logging logic is performed within a mutex, but it requires -// the formatting Logger to make only one call to Write per log event. -// -// NewSyncLogger wraps any Logger and serializes each call to its Log method. -// Using a SyncLogger has the benefit that it guarantees each log event is -// handled atomically within the wrapped logger, but it typically serializes -// both the formatting and output logic. Use a SyncLogger if the formatting -// logger may perform multiple writes per log event. -// -// Error Handling -// -// This package relies on the practice of wrapping or decorating loggers with -// other loggers to provide composable pieces of functionality. It also means -// that Logger.Log must return an error because some -// implementations—especially those that output log data to an io.Writer—may -// encounter errors that cannot be handled locally. This in turn means that -// Loggers that wrap other loggers should return errors from the wrapped -// logger up the stack. -// -// Fortunately, the decorator pattern also provides a way to avoid the -// necessity to check for errors every time an application calls Logger.Log. -// An application required to panic whenever its Logger encounters -// an error could initialize its logger as follows. -// -// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger := log.LoggerFunc(func(keyvals ...interface{}) error { -// if err := fmtlogger.Log(keyvals...); err != nil { -// panic(err) -// } -// return nil -// }) -package log diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go deleted file mode 100644 index d0faed4f098..00000000000 --- a/vendor/github.com/go-kit/log/json_logger.go +++ /dev/null @@ -1,91 +0,0 @@ -package log - -import ( - "encoding" - "encoding/json" - "fmt" - "io" - "reflect" -) - -type jsonLogger struct { - io.Writer -} - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewJSONLogger(w io.Writer) Logger { - return &jsonLogger{w} -} - -func (l *jsonLogger) Log(keyvals ...interface{}) error { - n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd - m := make(map[string]interface{}, n) - for i := 0; i < len(keyvals); i += 2 { - k := keyvals[i] - var v interface{} = ErrMissingValue - if i+1 < len(keyvals) { - v = keyvals[i+1] - } - merge(m, k, v) - } - enc := json.NewEncoder(l.Writer) - enc.SetEscapeHTML(false) - return enc.Encode(m) -} - -func merge(dst map[string]interface{}, k, v interface{}) { - var key string - switch x := k.(type) { - case string: - key = x - case fmt.Stringer: - key = safeString(x) - default: - key = fmt.Sprint(x) - } - - // We want json.Marshaler and encoding.TextMarshaller to take priority over - // err.Error() and v.String(). But json.Marshall (called later) does that by - // default so we force a no-op if it's one of those 2 case. - switch x := v.(type) { - case json.Marshaler: - case encoding.TextMarshaler: - case error: - v = safeError(x) - case fmt.Stringer: - v = safeString(x) - } - - dst[key] = v -} - -func safeString(str fmt.Stringer) (s string) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { - s = fmt.Sprintf("PANIC in String method: %v", panicVal) - } - } - }() - s = str.String() - return -} - -func safeError(err error) (s interface{}) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s = nil - } else { - s = fmt.Sprintf("PANIC in Error method: %v", panicVal) - } - } - }() - s = err.Error() - return -} diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go deleted file mode 100644 index fd681dcf922..00000000000 --- a/vendor/github.com/go-kit/log/level/doc.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package level implements leveled logging on top of Go kit's log package. To -// use the level package, create a logger as per normal in your func main, and -// wrap it with level.NewFilter. -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.AllowInfo()) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// It's also possible to configure log level from a string. For instance from -// a flag, environment variable or configuration file. -// -// fs := flag.NewFlagSet("myprogram") -// lvl := fs.String("log", "info", "debug, info, warn, error") -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error -// helper methods to emit leveled log events. -// -// logger.Log("foo", "bar") // as normal, no level -// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) -// if value > 100 { -// level.Error(logger).Log("value", value) -// } -// -// NewFilter allows precise control over what happens when a log event is -// emitted without a level key, or if a squelched level is used. Check the -// Option functions for details. -package level diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go deleted file mode 100644 index c641d985524..00000000000 --- a/vendor/github.com/go-kit/log/level/level.go +++ /dev/null @@ -1,256 +0,0 @@ -package level - -import ( - "errors" - "strings" - - "github.com/go-kit/log" -) - -// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse. -var ErrInvalidLevelString = errors.New("invalid level string") - -// Error returns a logger that includes a Key/ErrorValue pair. -func Error(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), ErrorValue()) -} - -// Warn returns a logger that includes a Key/WarnValue pair. -func Warn(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), WarnValue()) -} - -// Info returns a logger that includes a Key/InfoValue pair. -func Info(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), InfoValue()) -} - -// Debug returns a logger that includes a Key/DebugValue pair. -func Debug(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), DebugValue()) -} - -// NewFilter wraps next and implements level filtering. See the commentary on -// the Option functions for a detailed description of how to configure levels. -// If no options are provided, all leveled log events created with Debug, -// Info, Warn or Error helper methods are squelched and non-leveled log -// events are passed to next unmodified. -func NewFilter(next log.Logger, options ...Option) log.Logger { - l := &logger{ - next: next, - } - for _, option := range options { - option(l) - } - return l -} - -type logger struct { - next log.Logger - allowed level - squelchNoLevel bool - errNotAllowed error - errNoLevel error -} - -func (l *logger) Log(keyvals ...interface{}) error { - var hasLevel, levelAllowed bool - for i := 1; i < len(keyvals); i += 2 { - if v, ok := keyvals[i].(*levelValue); ok { - hasLevel = true - levelAllowed = l.allowed&v.level != 0 - break - } - } - if !hasLevel && l.squelchNoLevel { - return l.errNoLevel - } - if hasLevel && !levelAllowed { - return l.errNotAllowed - } - return l.next.Log(keyvals...) -} - -// Option sets a parameter for the leveled logger. -type Option func(*logger) - -// Allow the provided log level to pass. -func Allow(v Value) Option { - switch v { - case debugValue: - return AllowDebug() - case infoValue: - return AllowInfo() - case warnValue: - return AllowWarn() - case errorValue: - return AllowError() - default: - return AllowNone() - } -} - -// AllowAll is an alias for AllowDebug. -func AllowAll() Option { - return AllowDebug() -} - -// AllowDebug allows error, warn, info and debug level log events to pass. -func AllowDebug() Option { - return allowed(levelError | levelWarn | levelInfo | levelDebug) -} - -// AllowInfo allows error, warn and info level log events to pass. -func AllowInfo() Option { - return allowed(levelError | levelWarn | levelInfo) -} - -// AllowWarn allows error and warn level log events to pass. -func AllowWarn() Option { - return allowed(levelError | levelWarn) -} - -// AllowError allows only error level log events to pass. -func AllowError() Option { - return allowed(levelError) -} - -// AllowNone allows no leveled log events to pass. -func AllowNone() Option { - return allowed(0) -} - -func allowed(allowed level) Option { - return func(l *logger) { l.allowed = allowed } -} - -// Parse a string to its corresponding level value. Valid strings are "debug", -// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and -// strings.ToLower. -func Parse(level string) (Value, error) { - switch strings.TrimSpace(strings.ToLower(level)) { - case debugValue.name: - return debugValue, nil - case infoValue.name: - return infoValue, nil - case warnValue.name: - return warnValue, nil - case errorValue.name: - return errorValue, nil - default: - return nil, ErrInvalidLevelString - } -} - -// ParseDefault calls Parse and returns the default Value on error. -func ParseDefault(level string, def Value) Value { - v, err := Parse(level) - if err != nil { - return def - } - return v -} - -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return func(l *logger) { l.errNotAllowed = err } -} - -// SquelchNoLevel instructs Log to squelch log events with no level, so that -// they don't proceed through to the wrapped logger. If SquelchNoLevel is set -// to true and a log event is squelched in this way, the error value -// configured with ErrNoLevel is returned to the caller. -func SquelchNoLevel(squelch bool) Option { - return func(l *logger) { l.squelchNoLevel = squelch } -} - -// ErrNoLevel sets the error to return from Log when it squelches a log event -// with no level. By default, ErrNoLevel is nil; in this case the log event is -// squelched with no error. -func ErrNoLevel(err error) Option { - return func(l *logger) { l.errNoLevel = err } -} - -// NewInjector wraps next and returns a logger that adds a Key/level pair to -// the beginning of log events that don't already contain a level. In effect, -// this gives a default level to logs without a level. -func NewInjector(next log.Logger, level Value) log.Logger { - return &injector{ - next: next, - level: level, - } -} - -type injector struct { - next log.Logger - level interface{} -} - -func (l *injector) Log(keyvals ...interface{}) error { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(*levelValue); ok { - return l.next.Log(keyvals...) - } - } - kvs := make([]interface{}, len(keyvals)+2) - kvs[0], kvs[1] = key, l.level - copy(kvs[2:], keyvals) - return l.next.Log(kvs...) -} - -// Value is the interface that each of the canonical level values implement. -// It contains unexported methods that prevent types from other packages from -// implementing it and guaranteeing that NewFilter can distinguish the levels -// defined in this package from all other values. -type Value interface { - String() string - levelVal() -} - -// Key returns the unique key added to log events by the loggers in this -// package. -func Key() interface{} { return key } - -// ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return errorValue } - -// WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return warnValue } - -// InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return infoValue } - -// DebugValue returns the unique value added to log events by Debug. -func DebugValue() Value { return debugValue } - -var ( - // key is of type interface{} so that it allocates once during package - // initialization and avoids allocating every time the value is added to a - // []interface{} later. - key interface{} = "level" - - errorValue = &levelValue{level: levelError, name: "error"} - warnValue = &levelValue{level: levelWarn, name: "warn"} - infoValue = &levelValue{level: levelInfo, name: "info"} - debugValue = &levelValue{level: levelDebug, name: "debug"} -) - -type level byte - -const ( - levelDebug level = 1 << iota - levelInfo - levelWarn - levelError -) - -type levelValue struct { - name string - level -} - -func (v *levelValue) String() string { return v.name } -func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/log/log.go b/vendor/github.com/go-kit/log/log.go deleted file mode 100644 index 62e11adace5..00000000000 --- a/vendor/github.com/go-kit/log/log.go +++ /dev/null @@ -1,179 +0,0 @@ -package log - -import "errors" - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies or retains any of its elements must make a copy first. -type Logger interface { - Log(keyvals ...interface{}) error -} - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = errors.New("(MISSING)") - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With, -// WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func With(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - return &context{ - logger: l.logger, - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - keyvals: kvs[:len(kvs):len(kvs)], - hasValuer: l.hasValuer || containsValuer(keyvals), - sKeyvals: l.sKeyvals, - sHasValuer: l.sHasValuer, - } -} - -// WithPrefix returns a new contextual logger with keyvals prepended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.keyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(kvs, l.keyvals...) - return &context{ - logger: l.logger, - keyvals: kvs, - hasValuer: l.hasValuer || containsValuer(keyvals), - sKeyvals: l.sKeyvals, - sHasValuer: l.sHasValuer, - } -} - -// WithSuffix returns a new contextual logger with keyvals appended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithSuffix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.sKeyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(l.sKeyvals, kvs...) - return &context{ - logger: l.logger, - keyvals: l.keyvals, - hasValuer: l.hasValuer, - sKeyvals: kvs, - sHasValuer: l.sHasValuer || containsValuer(keyvals), - } -} - -// context is the Logger implementation returned by With, WithPrefix, and -// WithSuffix. It wraps a Logger and holds keyvals that it includes in all -// log events. Its Log method calls bindValues to generate values for each -// Valuer in the context keyvals. -// -// A context must always have the same number of stack frames between calls to -// its Log method and the eventual binding of Valuers to their value. This -// requirement comes from the functional requirement to allow a context to -// resolve application call site information for a Caller stored in the -// context. To do this we must be able to predict the number of logging -// functions on the stack when bindValues is called. -// -// Two implementation details provide the needed stack depth consistency. -// -// 1. newContext avoids introducing an additional layer when asked to -// wrap another context. -// 2. With, WithPrefix, and WithSuffix avoid introducing an additional -// layer by returning a newly constructed context with a merged keyvals -// rather than simply wrapping the existing context. -type context struct { - logger Logger - keyvals []interface{} - sKeyvals []interface{} // suffixes - hasValuer bool - sHasValuer bool -} - -func newContext(logger Logger) *context { - if c, ok := logger.(*context); ok { - return c - } - return &context{logger: logger} -} - -// Log replaces all value elements (odd indexes) containing a Valuer in the -// stored context with their generated value, appends keyvals, and passes the -// result to the wrapped Logger. -func (l *context) Log(keyvals ...interface{}) error { - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - if l.hasValuer { - // If no keyvals were appended above then we must copy l.keyvals so - // that future log events will reevaluate the stored Valuers. - if len(keyvals) == 0 { - kvs = append([]interface{}{}, l.keyvals...) - } - bindValues(kvs[:(len(l.keyvals))]) - } - kvs = append(kvs, l.sKeyvals...) - if l.sHasValuer { - bindValues(kvs[len(kvs)-len(l.sKeyvals):]) - } - return l.logger.Log(kvs...) -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc func(...interface{}) error - -// Log implements Logger by calling f(keyvals...). -func (f LoggerFunc) Log(keyvals ...interface{}) error { - return f(keyvals...) -} diff --git a/vendor/github.com/go-kit/log/logfmt_logger.go b/vendor/github.com/go-kit/log/logfmt_logger.go deleted file mode 100644 index a00305298b8..00000000000 --- a/vendor/github.com/go-kit/log/logfmt_logger.go +++ /dev/null @@ -1,62 +0,0 @@ -package log - -import ( - "bytes" - "io" - "sync" - - "github.com/go-logfmt/logfmt" -) - -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *logfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type logfmtLogger struct { - w io.Writer -} - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return &logfmtLogger{w} -} - -func (l logfmtLogger) Log(keyvals ...interface{}) error { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-kit/log/nop_logger.go b/vendor/github.com/go-kit/log/nop_logger.go deleted file mode 100644 index 1047d626c43..00000000000 --- a/vendor/github.com/go-kit/log/nop_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package log - -type nopLogger struct{} - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { return nopLogger{} } - -func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/log/staticcheck.conf b/vendor/github.com/go-kit/log/staticcheck.conf deleted file mode 100644 index 528438b97d2..00000000000 --- a/vendor/github.com/go-kit/log/staticcheck.conf +++ /dev/null @@ -1 +0,0 @@ -checks = ["all"] diff --git a/vendor/github.com/go-kit/log/stdlib.go b/vendor/github.com/go-kit/log/stdlib.go deleted file mode 100644 index 0338edbe2ba..00000000000 --- a/vendor/github.com/go-kit/log/stdlib.go +++ /dev/null @@ -1,151 +0,0 @@ -package log - -import ( - "bytes" - "io" - "log" - "regexp" - "strings" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter struct{} - -// Write implements io.Writer. -func (w StdlibWriter) Write(p []byte) (int, error) { - log.Print(strings.TrimSpace(string(p))) - return len(p), nil -} - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter struct { - Logger - timestampKey string - fileKey string - messageKey string - prefix string - joinPrefixToMsg bool -} - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption func(*StdlibAdapter) - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.timestampKey = key } -} - -// FileKey sets the key for the file and line field. By default, it's "caller". -func FileKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.fileKey = key } -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.messageKey = key } -} - -// Prefix configures the adapter to parse a prefix from stdlib log events. If -// you provide a non-empty prefix to the stdlib logger, then your should provide -// that same prefix to the adapter via this option. -// -// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to -// true if you want to include the parsed prefix in the msg. -func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.prefix = prefix; a.joinPrefixToMsg = joinPrefixToMsg } -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - a := StdlibAdapter{ - Logger: logger, - timestampKey: "ts", - fileKey: "caller", - messageKey: "msg", - } - for _, option := range options { - option(&a) - } - return a -} - -func (a StdlibAdapter) Write(p []byte) (int, error) { - p = a.handlePrefix(p) - - result := subexps(p) - keyvals := []interface{}{} - var timestamp string - if date, ok := result["date"]; ok && date != "" { - timestamp = date - } - if time, ok := result["time"]; ok && time != "" { - if timestamp != "" { - timestamp += " " - } - timestamp += time - } - if timestamp != "" { - keyvals = append(keyvals, a.timestampKey, timestamp) - } - if file, ok := result["file"]; ok && file != "" { - keyvals = append(keyvals, a.fileKey, file) - } - if msg, ok := result["msg"]; ok { - msg = a.handleMessagePrefix(msg) - keyvals = append(keyvals, a.messageKey, msg) - } - if err := a.Logger.Log(keyvals...); err != nil { - return 0, err - } - return len(p), nil -} - -func (a StdlibAdapter) handlePrefix(p []byte) []byte { - if a.prefix != "" { - p = bytes.TrimPrefix(p, []byte(a.prefix)) - } - return p -} - -func (a StdlibAdapter) handleMessagePrefix(msg string) string { - if a.prefix == "" { - return msg - } - - msg = strings.TrimPrefix(msg, a.prefix) - if a.joinPrefixToMsg { - msg = a.prefix + msg - } - return msg -} - -const ( - logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` - logRegexpTime = `(?P