diff --git a/.packit.yaml b/.packit.yaml
index b7e89186772..54f9d8e8327 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -106,9 +106,7 @@ jobs:
packages: [podman-fedora]
notifications: *packit_generic_failure_notification
targets:
- - fedora-rawhide
- - fedora-42
- - fedora-41
+ - fedora-all
tmt_plan: "/plans/system/*"
tf_extra_params:
environments:
@@ -144,13 +142,6 @@ jobs:
dist_git_branches: &fedora_targets
- fedora-all
- - job: propose_downstream
- trigger: release
- update_release: false
- packages: [podman-centos]
- dist_git_branches:
- - c10s
-
- job: koji_build
trigger: commit
packages: [podman-fedora]
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index ce0e3e44d65..279bb52c946 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -26,6 +26,7 @@ Maintainers and Reviewers for the Skopeo and Buildah projects are found in their
| Jake Correnti | [jakecorrenti](https://github.com/jakecorrenti) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Jason Greene | [n1hility](https://github.com/n1hility) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Jhon Honce | [jwhonce](https://github.com/jwhonce) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
+| Craig Loewen | [craigloewen-msft](https://github.com/craigloewen-msft) | Reviewer | [Microsoft](https://github.com/microsoft) |
| Urvashi Mohnani | [umohnani8](https://github.com/umohnani8) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Aditya Rajan | [flouthoc](https://github.com/flouthoc) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Jan Rodák | [Honny1](https://github.com/Honny1) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
diff --git a/Makefile b/Makefile
index a72b802a84f..2eb099020f4 100644
--- a/Makefile
+++ b/Makefile
@@ -61,6 +61,7 @@ REMOTETAGS ?= remote exclude_graphdriver_btrfs containers_image_openpgp
BUILDTAGS ?= \
$(shell hack/apparmor_tag.sh) \
$(shell hack/btrfs_installed_tag.sh) \
+ $(shell hack/sqlite_tag.sh) \
$(shell hack/systemd_tag.sh) \
$(shell hack/libsubid_tag.sh) \
$(if $(filter linux,$(GOOS)), seccomp,)
@@ -361,7 +362,7 @@ $(IN_CONTAINER): %-in-container:
$(PODMANCMD) run --rm --env HOME=/root \
-v $(CURDIR):/src -w /src \
--security-opt label=disable \
- docker.io/library/golang:1.22 \
+ quay.io/libpod/validatepr:latest \
make $(*)
@@ -478,21 +479,7 @@ podman-testing: bin/podman-testing
.PHONY: generate-bindings
generate-bindings: .install.golangci-lint
-ifneq ($(GOOS),darwin)
$(GOCMD) generate ./pkg/bindings/... ;
-endif
-
-# DO NOT USE: use local-cross instead
-bin/podman.cross.%:
- TARGET="$*"; \
- GOOS="$${TARGET%%.*}"; \
- GOARCH="$${TARGET##*.}"; \
- CGO_ENABLED=0 \
- $(GO) build \
- $(BUILDFLAGS) \
- $(GO_LDFLAGS) '$(LDFLAGS_PODMAN)' \
- -tags '$(BUILDTAGS_CROSS)' \
- -o "$@" ./cmd/podman
.PHONY: local-cross
local-cross: $(CROSS_BUILD_TARGETS) ## Cross compile podman binary for multiple architectures
diff --git a/cmd/podman/common/build.go b/cmd/podman/common/build.go
index a213ffe596d..dec5133b820 100644
--- a/cmd/podman/common/build.go
+++ b/cmd/podman/common/build.go
@@ -523,7 +523,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *Buil
Annotations: flags.Annotation,
Args: args,
BlobDirectory: flags.BlobCache,
- BuildOutput: flags.BuildOutput,
+ BuildOutputs: flags.BuildOutputs,
CacheFrom: cacheFrom,
CacheTo: cacheTo,
CacheTTL: cacheTTL,
diff --git a/cmd/podman/images/buildx_inspect.go b/cmd/podman/images/buildx_inspect.go
new file mode 100644
index 00000000000..5fe1dab6073
--- /dev/null
+++ b/cmd/podman/images/buildx_inspect.go
@@ -0,0 +1,81 @@
+package images
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/podman/v5/cmd/podman/registry"
+ "github.com/spf13/cobra"
+)
+
+type buildNode struct {
+ Name string
+ Endpoint string
+ Status string
+ BuildkitVersion string
+ Platforms []string
+}
+
+type buildxInspectOutput struct {
+ builderName string
+ driverName string
+ Nodes []buildNode
+}
+
+var buildxInspectCmd = &cobra.Command{
+ Use: "inspect",
+ Short: "Inspects build capabilities",
+ Long: "Displays information about the current builder instance (compatibility with Docker buildx inspect)",
+ RunE: runBuildxInspect,
+ Example: `podman buildx inspect
+ podman buildx inspect --bootstrap`,
+}
+
+func init() {
+ buildxInspectCmd.Flags().Bool("bootstrap", false, "Currently a No Op for podman")
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Command: buildxInspectCmd,
+ Parent: buildxCmd,
+ })
+}
+
+func runBuildxInspect(cmd *cobra.Command, args []string) error {
+ info, err := registry.ContainerEngine().Info(registry.Context())
+
+ if err != nil {
+ return fmt.Errorf("retrieving podman information: %w", err)
+ }
+
+ nativePlatform := fmt.Sprintf("%s/%s", info.Host.OS, info.Host.Arch)
+
+ // Constants are based on default values for Docker buildx inspect.
+ defaultNode := buildNode{
+ Name: "default",
+ Endpoint: "default",
+ Status: "running",
+ BuildkitVersion: "N/A",
+ Platforms: []string{nativePlatform},
+ }
+
+ defaultNode.Platforms = append(defaultNode.Platforms, info.Host.EmulatedArchitectures...)
+
+ out := buildxInspectOutput{
+ builderName: "default",
+ driverName: "podman",
+ Nodes: []buildNode{defaultNode},
+ }
+
+ fmt.Printf("Name: %s\n", out.builderName)
+ fmt.Printf("Driver: %s\n", out.driverName)
+ fmt.Println()
+
+ fmt.Println("Nodes:")
+ fmt.Printf("Name: %s\n", out.Nodes[0].Name)
+ fmt.Printf("Endpoint: %s\n", out.Nodes[0].Endpoint)
+ fmt.Printf("Status: %s\n", out.Nodes[0].Status)
+ fmt.Printf("Buildkit version: %s\n", out.Nodes[0].BuildkitVersion)
+
+ fmt.Printf("Platforms: %s\n", strings.Join(out.Nodes[0].Platforms, ", "))
+ fmt.Println("Labels: ")
+ return nil
+}
diff --git a/cmd/podman/images/import.go b/cmd/podman/images/import.go
index 1efbaac3944..1a039726a18 100644
--- a/cmd/podman/images/import.go
+++ b/cmd/podman/images/import.go
@@ -130,7 +130,7 @@ func importCon(cmd *cobra.Command, args []string) error {
}
errFileName := parse.ValidateFileName(source)
- errURL := parse.ValidURL(source)
+ errURL := parse.ValidWebURL(source)
if errURL == nil {
importOpts.SourceIsURL = true
}
diff --git a/cmd/podman/kube/play.go b/cmd/podman/kube/play.go
index 98659bc9215..02b95729800 100644
--- a/cmd/podman/kube/play.go
+++ b/cmd/podman/kube/play.go
@@ -369,7 +369,7 @@ func readerFromArg(fileName string) (*bytes.Reader, error) {
switch {
case fileName == "-": // Read from stdin
reader = os.Stdin
- case parse.ValidURL(fileName) == nil:
+ case parse.ValidWebURL(fileName) == nil:
response, err := http.Get(fileName)
if err != nil {
return nil, err
diff --git a/cmd/podman/machine/init.go b/cmd/podman/machine/init.go
index 75cfa61da48..2b3277d3dae 100644
--- a/cmd/podman/machine/init.go
+++ b/cmd/podman/machine/init.go
@@ -3,6 +3,7 @@
package machine
import (
+ "errors"
"fmt"
"os"
@@ -232,6 +233,14 @@ func initMachine(cmd *cobra.Command, args []string) error {
err = shim.Init(initOpts, provider)
if err != nil {
+ // The installation is partially complete and podman should
+ // exit gracefully with no error and no success message.
+ // Examples:
+ // - a user has chosen to perform their own reboot
+ // - reexec for limited admin operations, returning to parent
+ if errors.Is(err, define.ErrInitRelaunchAttempt) {
+ return nil
+ }
return err
}
diff --git a/cmd/podman/parse/net.go b/cmd/podman/parse/net.go
index 14c954f9e6f..c1dd72aa35f 100644
--- a/cmd/podman/parse/net.go
+++ b/cmd/podman/parse/net.go
@@ -157,14 +157,21 @@ func parseEnvOrLabelFile(envOrLabel map[string]string, filename, configType stri
return scanner.Err()
}
-// ValidURL checks a string urlStr is a url or not
-func ValidURL(urlStr string) error {
- url, err := url.ParseRequestURI(urlStr)
+// ValidWebURL checks a string urlStr is a url or not
+func ValidWebURL(urlStr string) error {
+ parsedURL, err := url.ParseRequestURI(urlStr)
if err != nil {
- return fmt.Errorf("invalid url %q: %w", urlStr, err)
+ return fmt.Errorf("invalid URL %q: %w", urlStr, err)
}
- if url.Scheme == "" {
- return fmt.Errorf("invalid url %q: missing scheme", urlStr)
+
+ // to be a valid web url, scheme must be either http or https
+ if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
+ return fmt.Errorf("invalid URL %q: unsupported scheme %q", urlStr, parsedURL.Scheme)
+ }
+
+ // ensure url contain a host
+ if parsedURL.Host == "" {
+ return fmt.Errorf("invalid URL %q: missing host", urlStr)
}
return nil
}
diff --git a/cmd/podman/parse/net_test.go b/cmd/podman/parse/net_test.go
index f71dc6ba1fe..9dc8abe30f3 100644
--- a/cmd/podman/parse/net_test.go
+++ b/cmd/podman/parse/net_test.go
@@ -158,3 +158,69 @@ func TestGetAllLabelsFile(t *testing.T) {
result, _ := GetAllLabels(fileLabels, Var1)
assert.Equal(t, len(result), 3)
}
+
+func TestValidWebURL(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ wantErr bool
+ }{
+ {
+ name: "Valid HTTP URL",
+ input: "http://example.com",
+ wantErr: false,
+ },
+ {
+ name: "Valid HTTPS URL",
+ input: "https://example.com",
+ wantErr: false,
+ },
+ {
+ name: "Missing scheme",
+ input: "example.com",
+ wantErr: true,
+ },
+ {
+ name: "Unsupported scheme - FTP",
+ input: "ftp://example.com",
+ wantErr: true,
+ },
+ {
+ name: "Missing host",
+ input: "https://",
+ wantErr: true,
+ },
+ {
+ name: "Local file path - Windows style",
+ input: "C:/hello/world",
+ wantErr: true,
+ },
+ {
+ name: "Local file path - Unix style",
+ input: "/usr/local/bin",
+ wantErr: true,
+ },
+ {
+ name: "Invalid URL characters",
+ input: "https://example.com/%%%",
+ wantErr: true,
+ },
+ {
+ name: "Valid URL with port",
+ input: "https://example.com:8080",
+ wantErr: false,
+ },
+ {
+ name: "Valid URL with path",
+ input: "https://example.com/path/to/resource",
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := ValidWebURL(tt.input)
+ assert.Equal(t, tt.wantErr, err != nil, "ValidWebURL(%q) = %v, wantErr %v", tt.input, err, tt.wantErr)
+ })
+ }
+}
diff --git a/contrib/validatepr/Containerfile b/contrib/validatepr/Containerfile
index 5eba4c8fbe1..dde52ee607f 100644
--- a/contrib/validatepr/Containerfile
+++ b/contrib/validatepr/Containerfile
@@ -3,15 +3,15 @@ FROM registry.fedoraproject.org/fedora:latest
WORKDIR /go/src/github.com/containers/podman
RUN dnf install -y systemd-devel \
- libassuan-devel \
- libseccomp-devel \
- gpgme-devel \
- device-mapper-devel \
+ awk \
btrfs-progs-devel \
+ git \
golang \
+ gpgme-devel \
+ libassuan-devel \
+ libseccomp-devel \
make \
man-db \
- git \
perl-Clone \
perl-FindBin \
- pre-commit && dnf clean all
+ pre-commit && dnf clean all
diff --git a/docs/source/markdown/options/mount.md b/docs/source/markdown/options/mount.md
index 0d298c61b42..ada1fb86704 100644
--- a/docs/source/markdown/options/mount.md
+++ b/docs/source/markdown/options/mount.md
@@ -32,17 +32,28 @@ Options specific to type=**artifact**:
- *title*: If the artifact source contains multiple blobs a title can be set
which is compared against `org.opencontainers.image.title` annotation.
+- *name*: This can be used to overwrite the filename we use inside the container
+ for mounting. On a single blob artifact the name is used as is if *dst* is a
+ directory and otherwise ignored. With a multi blob artifact the name will be
+ used with an index suffix `-x` where x is the layer index in the artifact
+ starting with 0.
+
The *src* argument contains the name of the artifact, which must already exist locally.
The *dst* argument contains the target path, if the path in the container is a
-directory or does not exist the blob title (`org.opencontainers.image.title`
-annotation) will be used as filename and joined to the path. If the annotation
-does not exist the digest will be used as filename instead. This results in all blobs
-of the artifact mounted into the container at the given path.
+directory the blob title (`org.opencontainers.image.title` annotation) will be used as
+filename and joined to the path. If the annotation does not exist the digest will be
+used as filename instead. This results in all blobs of the artifact mounted into the
+container at the given path.
However, if the *dst* path is an existing file in the container, then the blob will be
mounted directly on it. This only works when the artifact contains a single blob
or when either *digest* or *title* are specified.
+If the *dst* path does not already exist in the container then if the artifact contains
+a single blob it behaves like existing file case and mounts directly to that path.
+If the artifact has more than one blob it works like the existing directory case and
+mounts each blob as file within the *dst* path.
+
Options specific to type=**volume**:
- *ro*, *readonly*: *true* or *false* (default if unspecified: *false*).
diff --git a/docs/source/markdown/options/network.md b/docs/source/markdown/options/network.md
index d97ba4218ad..eb0d304f4a4 100644
--- a/docs/source/markdown/options/network.md
+++ b/docs/source/markdown/options/network.md
@@ -27,7 +27,7 @@ Valid _mode_ values are:
- **container:**_id_: Reuse another container's network stack.
-- **host**: Do not create a network namespace, the container uses the host's network. Note: The host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
+- **host**: Use the host's network namespace for the container instead of creating an isolated namespace. Warning: This gives the container full access to abstract Unix domain sockets and to TCP/UDP sockets bound to localhost. Since these mechanisms are often used to prevent access to sensitive system services, isolating them from access by external entities, use of this option may be considered a security vulnerability.
- **ns:**_path_: Path to a network namespace to join.
diff --git a/docs/source/markdown/podman-container-diff.1.md.in b/docs/source/markdown/podman-container-diff.1.md.in
index d66d65644e9..507e801ff45 100644
--- a/docs/source/markdown/podman-container-diff.1.md.in
+++ b/docs/source/markdown/podman-container-diff.1.md.in
@@ -25,7 +25,7 @@ Alter the output into a different format. The only valid format for **podman con
@@option latest
-## EXAMPLE
+## EXAMPLES
```
# podman container diff container1
@@ -44,6 +44,11 @@ $ podman container diff --format json container1 container2
}
```
+```
+$ podman container diff --latest
+C /etc
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-container(1)](podman-container.1.md)**
diff --git a/docs/source/markdown/podman-container-inspect.1.md.in b/docs/source/markdown/podman-container-inspect.1.md.in
index cda408a15d6..354f07df38b 100644
--- a/docs/source/markdown/podman-container-inspect.1.md.in
+++ b/docs/source/markdown/podman-container-inspect.1.md.in
@@ -76,7 +76,7 @@ Valid placeholders for the Go template are listed below:
In addition to normal output, display the total file size if the type is a container.
-## EXAMPLE
+## EXAMPLES
Inspect the specified container and print its information in json format.
```
diff --git a/docs/source/markdown/podman-container-restore.1.md b/docs/source/markdown/podman-container-restore.1.md
index 2cbb4c46f7a..cc50e2be699 100644
--- a/docs/source/markdown/podman-container-restore.1.md
+++ b/docs/source/markdown/podman-container-restore.1.md
@@ -159,7 +159,7 @@ option is ignored. Defaults to not restoring *containers* with established TCP
connections.\
The default is **false**.
-## EXAMPLE
+## EXAMPLES
Restore the container "mywebserver".
```
# podman container restore mywebserver
diff --git a/docs/source/markdown/podman-diff.1.md.in b/docs/source/markdown/podman-diff.1.md.in
index 694d488a86a..2d7d51abfff 100644
--- a/docs/source/markdown/podman-diff.1.md.in
+++ b/docs/source/markdown/podman-diff.1.md.in
@@ -25,7 +25,7 @@ Alter the output into a different format. The only valid format for **podman di
@@option latest
-## EXAMPLE
+## EXAMPLES
Show container-modified files versus the container's image:
```
diff --git a/docs/source/markdown/podman-exec.1.md.in b/docs/source/markdown/podman-exec.1.md.in
index 35013a9d0bc..ae661a8395e 100644
--- a/docs/source/markdown/podman-exec.1.md.in
+++ b/docs/source/markdown/podman-exec.1.md.in
@@ -88,6 +88,11 @@ Execute command as the specified user in selected container:
$ podman exec --user root ctrID ls
```
+Execute command but do not attach to the exec session leaving the command running in the background:
+```
+$ podman exec -d ctrID find /path/to/search -name yourfile
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-run(1)](podman-run.1.md)**
diff --git a/docs/source/markdown/podman-farm-create.1.md b/docs/source/markdown/podman-farm-create.1.md
index 4ab7111ddf3..63a6f39c990 100644
--- a/docs/source/markdown/podman-farm-create.1.md
+++ b/docs/source/markdown/podman-farm-create.1.md
@@ -13,16 +13,14 @@ Create a new farm with connections that Podman knows about which were added via
An empty farm can be created without adding any connections to it. Add or remove
connections from a farm via the *podman farm update* command.
-## EXAMPLE
+## EXAMPLES
-
-
-Create the specified farm:
+Create the specified farm with no connections:
```
$ podman farm create farm2
```
-Create multiple farms:
+Create a farm with connections:
```
$ podman farm create farm1 f37 f38
```
diff --git a/docs/source/markdown/podman-image-diff.1.md b/docs/source/markdown/podman-image-diff.1.md
index 73301110040..85e68de74f5 100644
--- a/docs/source/markdown/podman-image-diff.1.md
+++ b/docs/source/markdown/podman-image-diff.1.md
@@ -23,7 +23,7 @@ The output is prefixed with the following symbols:
Alter the output into a different format. The only valid format for **podman image diff** is `json`.
-## EXAMPLE
+## EXAMPLES
Display image differences from images parent layer:
```
diff --git a/docs/source/markdown/podman-image-mount.1.md b/docs/source/markdown/podman-image-mount.1.md
index 7df0eec415e..cd86bca4176 100644
--- a/docs/source/markdown/podman-image-mount.1.md
+++ b/docs/source/markdown/podman-image-mount.1.md
@@ -30,7 +30,7 @@ Mount all images.
Print the mounted images in specified format (json).
-## EXAMPLE
+## EXAMPLES
Mount multiple images. Note: In rootless mode, image mounting works only after executing the podman unshare command to enter the user namespace.
```
diff --git a/docs/source/markdown/podman-image-unmount.1.md b/docs/source/markdown/podman-image-unmount.1.md
index fe20c509b33..30640a77216 100644
--- a/docs/source/markdown/podman-image-unmount.1.md
+++ b/docs/source/markdown/podman-image-unmount.1.md
@@ -46,5 +46,11 @@ Unmount all images:
```
podman image unmount --all
```
+
+Force unmount image with a given ID:
+```
+podman image unmount --force imageID
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-image-mount(1)](podman-image-mount.1.md)**, **[podman-mount(1)](podman-mount.1.md)**
diff --git a/docs/source/markdown/podman-images.1.md.in b/docs/source/markdown/podman-images.1.md.in
index 6c261071054..2072d847410 100644
--- a/docs/source/markdown/podman-images.1.md.in
+++ b/docs/source/markdown/podman-images.1.md.in
@@ -119,7 +119,7 @@ Lists only the image IDs.
Sort by *created*, *id*, *repository*, *size* or *tag* (default: **created**)
When sorting by *repository* it also sorts by the *tag* as second criteria to provide a stable output.
-## EXAMPLE
+## EXAMPLES
List all non-dangling images in local storage:
```
diff --git a/docs/source/markdown/podman-init.1.md.in b/docs/source/markdown/podman-init.1.md.in
index ec05283e470..f9e599dd1ae 100644
--- a/docs/source/markdown/podman-init.1.md.in
+++ b/docs/source/markdown/podman-init.1.md.in
@@ -24,22 +24,31 @@ Initialize all containers. Containers that have already initialized (including c
@@option latest
-## EXAMPLE
+## EXAMPLES
Initialize specified container with a given ID.
```
-podman init 35480fc9d568
+$ podman init 35480fc9d568
```
Initialize specified container with a given name.
```
-podman init test1
+$ podman init test1
```
Initialize the latest container. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
```
-podman init --latest
+$ podman init --latest
```
+
+Initialize all containers.
+```
+$ podman init --all
+9d2629dda7b9d4ca35c1fc63fa56592a08b9d5ab988b4301fddf16b623f676cc
+a9b78bcac97e131236930e3fa0be576e95ab89c96a7cb6fb1c821b772db9f623
+9db345273719c14bc254f90ef2df24779193b42d68b1364c0914ca6f76cf5e9c
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-start(1)](podman-start.1.md)**
diff --git a/docs/source/markdown/podman-inspect.1.md.in b/docs/source/markdown/podman-inspect.1.md.in
index bc3dbcdb1ab..587b6d3e5e3 100644
--- a/docs/source/markdown/podman-inspect.1.md.in
+++ b/docs/source/markdown/podman-inspect.1.md.in
@@ -38,7 +38,7 @@ In addition to normal output, display the total file size if the type is a conta
Return JSON for the specified type. Type can be 'container', 'image', 'volume', 'network', 'pod', or 'all' (default: all)
(Only meaningful when invoked as *podman inspect*)
-## EXAMPLE
+## EXAMPLES
Inspect the fedora image:
```
diff --git a/docs/source/markdown/podman-load.1.md b/docs/source/markdown/podman-load.1.md
index 0d097e0afd6..0b0672f335b 100644
--- a/docs/source/markdown/podman-load.1.md
+++ b/docs/source/markdown/podman-load.1.md
@@ -17,13 +17,6 @@ The local client further supports loading an **oci-dir** or a **docker-dir** as
The **quiet** option suppresses the progress output when set.
Note: `:` is a restricted character and cannot be part of the file name.
-
-**podman [GLOBAL OPTIONS]**
-
-**podman load [GLOBAL OPTIONS]**
-
-**podman load [OPTIONS]**
-
## OPTIONS
#### **--help**, **-h**
diff --git a/docs/source/markdown/podman-machine-init.1.md.in b/docs/source/markdown/podman-machine-init.1.md.in
index a6f8ba06549..11d812ee405 100644
--- a/docs/source/markdown/podman-machine-init.1.md.in
+++ b/docs/source/markdown/podman-machine-init.1.md.in
@@ -199,6 +199,12 @@ Initialize a Podman machine for the specified name pulling the content from the
$ podman machine init myvm
```
+Initialize and start a new Podman machine in one step.
+
+```
+podman machine init --now
+```
+
Initialize the default Podman machine pulling the content from the internet defaulting to rootful mode. The default is rootless.
```
$ podman machine init --rootful
diff --git a/docs/source/markdown/podman-machine-reset.1.md b/docs/source/markdown/podman-machine-reset.1.md
index 7bb92d78960..63ed42953bb 100644
--- a/docs/source/markdown/podman-machine-reset.1.md
+++ b/docs/source/markdown/podman-machine-reset.1.md
@@ -40,6 +40,12 @@ Are you sure you want to continue? [y/N] y
$
```
+Force reset without confirmation.
+```
+$ podman machine reset --force
+$
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-machine(1)](podman-machine.1.md)**
diff --git a/docs/source/markdown/podman-manifest-add.1.md.in b/docs/source/markdown/podman-manifest-add.1.md.in
index db02b00c05e..6f6f511927d 100644
--- a/docs/source/markdown/podman-manifest-add.1.md.in
+++ b/docs/source/markdown/podman-manifest-add.1.md.in
@@ -139,7 +139,7 @@ configuration information.
$ podman manifest add oci-archive:/tmp/myimage
-## EXAMPLE
+## EXAMPLES
Add specified default image from source manifest list to destination manifest list:
```
diff --git a/docs/source/markdown/podman-manifest-annotate.1.md.in b/docs/source/markdown/podman-manifest-annotate.1.md.in
index 38a186aeee0..7f12cdfc105 100644
--- a/docs/source/markdown/podman-manifest-annotate.1.md.in
+++ b/docs/source/markdown/podman-manifest-annotate.1.md.in
@@ -50,7 +50,7 @@ associated with, at most, one subject.
@@option variant.manifest
-## EXAMPLE
+## EXAMPLES
Update arch and variant information to specified manifest list for image:
```
diff --git a/docs/source/markdown/podman-manifest-inspect.1.md.in b/docs/source/markdown/podman-manifest-inspect.1.md.in
index fe0f2e3e971..cbf676eef70 100644
--- a/docs/source/markdown/podman-manifest-inspect.1.md.in
+++ b/docs/source/markdown/podman-manifest-inspect.1.md.in
@@ -9,6 +9,7 @@ podman\-manifest\-inspect - Display a manifest list or image index
## DESCRIPTION
Displays the manifest list or image index stored using the specified image name.
+
## RETURN VALUE
A formatted JSON representation of the manifest list or image index.
diff --git a/docs/source/markdown/podman-manifest-push.1.md.in b/docs/source/markdown/podman-manifest-push.1.md.in
index c755fb02a4a..47ced3c1b6c 100644
--- a/docs/source/markdown/podman-manifest-push.1.md.in
+++ b/docs/source/markdown/podman-manifest-push.1.md.in
@@ -78,7 +78,7 @@ Sign the pushed images with a sigstore signature using a private key at the spec
DESTINATION is the location the container image is pushed to. It supports all transports from `containers-transports(5)`. If no transport is specified, the `docker` (i.e., container registry) transport is used. For remote clients, including Mac and Windows (excluding WSL2) machines, `docker` is the only supported transport.
-## EXAMPLE
+## EXAMPLES
Push manifest list to container registry:
```
diff --git a/docs/source/markdown/podman-manifest-rm.1.md b/docs/source/markdown/podman-manifest-rm.1.md
index 0613db19fa3..ed9125739d4 100644
--- a/docs/source/markdown/podman-manifest-rm.1.md
+++ b/docs/source/markdown/podman-manifest-rm.1.md
@@ -15,17 +15,15 @@ Removes one or more locally stored manifest lists.
If a specified manifest does not exist in the local storage, ignore it and do not throw an error.
-## EXAMPLE
+## EXAMPLES
-podman manifest rm ``
+```
+podman manifest rm listid
+```
-podman manifest rm listid1 listid2
-
-**storage.conf** (`/etc/containers/storage.conf`)
-
-storage.conf is the storage configuration file for all tools using containers/storage
-
-The storage configuration file specifies all of the available container storage options for tools using shared container storage.
+```
+podman manifest rm --ignore listid1 listid2
+```
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-manifest(1)](podman-manifest.1.md)**, **[containers-storage.conf(5)](https://github.com/containers/storage/blob/main/docs/containers-storage.conf.5.md)**
diff --git a/docs/source/markdown/podman-mount.1.md.in b/docs/source/markdown/podman-mount.1.md.in
index 48e6ce4c8da..882a6b93eb8 100644
--- a/docs/source/markdown/podman-mount.1.md.in
+++ b/docs/source/markdown/podman-mount.1.md.in
@@ -39,7 +39,7 @@ Print the mounted containers in specified format (json).
Do not truncate the output (default *false*).
-## EXAMPLE
+## EXAMPLES
In rootful mode, Mount specified container.
```
diff --git a/docs/source/markdown/podman-network-connect.1.md b/docs/source/markdown/podman-network-connect.1.md
index b1b22515391..10962d01955 100644
--- a/docs/source/markdown/podman-network-connect.1.md
+++ b/docs/source/markdown/podman-network-connect.1.md
@@ -26,7 +26,7 @@ Set a static ipv6 address for this container on this network.
#### **--mac-address**=*address*
Set a static mac address for this container on this network.
-## EXAMPLE
+## EXAMPLES
Connect specified container to a named network:
```
@@ -43,6 +43,11 @@ Connect specified container to named network with a static ip:
podman network connect --ip 10.89.1.13 test web
```
+Connect specified container to named network with a static mac address:
+```
+podman network connect --mac-address 92:d0:c6:0a:29:33 test web
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-network(1)](podman-network.1.md)**, **[podman-network-inspect(1)](podman-network-inspect.1.md)**, **[podman-network-disconnect(1)](podman-network-disconnect.1.md)**
diff --git a/docs/source/markdown/podman-network-create.1.md b/docs/source/markdown/podman-network-create.1.md
index 1a7511f12c7..d96319c678a 100644
--- a/docs/source/markdown/podman-network-create.1.md
+++ b/docs/source/markdown/podman-network-create.1.md
@@ -151,7 +151,7 @@ The subnet in CIDR notation. Can be specified multiple times to allocate more th
The argument order of the **--subnet**, **--gateway** and **--ip-range** options must match.
This is useful to set a static ipv4 and ipv6 subnet.
-## EXAMPLE
+## EXAMPLES
Create a network with no options.
```
diff --git a/docs/source/markdown/podman-network-rm.1.md b/docs/source/markdown/podman-network-rm.1.md
index 788eca6ea8c..766021cb09f 100644
--- a/docs/source/markdown/podman-network-rm.1.md
+++ b/docs/source/markdown/podman-network-rm.1.md
@@ -19,18 +19,24 @@ running, the container is stopped and removed.
Seconds to wait before forcibly stopping the running containers that are using the specified network. The --force option must be specified to use the --time option. Use -1 for infinite wait.
-## EXAMPLE
+## EXAMPLES
Delete specified network:
```
# podman network rm podman9
-Deleted: podman9
+podman9
```
Delete specified network and all containers associated with the network:
```
# podman network rm -f fred
-Deleted: fred
+fred
+```
+
+Delete specified network and all containers associated with the network after waiting up to 15 seconds:
+```
+# podman network rm --force --time 15 fred
+fred
```
## Exit Status
diff --git a/docs/source/markdown/podman-pod-pause.1.md b/docs/source/markdown/podman-pod-pause.1.md
index cf238ea482d..74d53493021 100644
--- a/docs/source/markdown/podman-pod-pause.1.md
+++ b/docs/source/markdown/podman-pod-pause.1.md
@@ -19,16 +19,23 @@ Pause all pods.
Instead of providing the pod name or ID, pause the last created pod. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
-## EXAMPLE
+## EXAMPLES
Pause a pod with a given name:
```
-podman pod pause mywebserverpod
+$ podman pod pause mywebserverpod
```
Pause a pod with a given ID:
```
-podman pod pause 860a4b23
+$ podman pod pause 860a4b23
+```
+
+Pause all pods
+```
+$ podman pod pause --all
+817973d45404da08f1fe393a13c8eeb0948f4a259d8835f083370b4a63cb0431
+0793d692719c8ef1f983fd29d7568e817c5a8e865e2b3925201a75dce24cfe80
```
## SEE ALSO
diff --git a/docs/source/markdown/podman-pod-prune.1.md b/docs/source/markdown/podman-pod-prune.1.md
index 4361467c400..1ef005166ae 100644
--- a/docs/source/markdown/podman-pod-prune.1.md
+++ b/docs/source/markdown/podman-pod-prune.1.md
@@ -19,6 +19,8 @@ Force removal of all running pods and their containers. The default is false.
Remove all stopped pods and their containers from local storage.
```
$ sudo podman pod prune
+WARNING! This will remove all stopped/exited pods..
+Are you sure you want to continue? [y/N] y
22b8813332948064b6566370088c5e0230eeaf15a58b1c5646859fd9fc364fe7
2afb26869fe5beab979c234afb75c7506063cd4655b1a73557c9d583ff1aebe9
49161ad2a722cf18722f0e17199a9e840703a17d1158cdeda502b6d54080f674
@@ -26,6 +28,15 @@ $ sudo podman pod prune
6bb06573787efb8b0675bc88ebf8361f1a56d3ac7922d1a6436d8f59ffd955f1
```
+Force removal of all running pods and their containers.
+```
+$ sudo podman pod prune --force
+22b8813332948064b6566370088c5e0230eeaf15a58b1c5646859fd9fc364fe7
+2afb26869fe5beab979c234afb75c7506063cd4655b1a73557c9d583ff1aebe9
+49161ad2a722cf18722f0e17199a9e840703a17d1158cdeda502b6d54080f674
+5ca429f37fb83a9f54eea89e3a9102b7780a6e6ae5f132db0672da551d862c4a
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-pod(1)](podman-pod.1.md)**
diff --git a/docs/source/markdown/podman-pod-stats.1.md.in b/docs/source/markdown/podman-pod-stats.1.md.in
index c9ebde8aebd..943e36c8cd9 100644
--- a/docs/source/markdown/podman-pod-stats.1.md.in
+++ b/docs/source/markdown/podman-pod-stats.1.md.in
@@ -42,7 +42,7 @@ When using a Go template, precede the format with `table` to print headers.
@@option no-stream
-## EXAMPLE
+## EXAMPLES
List statistics about all pods without streaming:
```
diff --git a/docs/source/markdown/podman-ps.1.md b/docs/source/markdown/podman-ps.1.md
index 4689c9860df..7efb37e9b42 100644
--- a/docs/source/markdown/podman-ps.1.md
+++ b/docs/source/markdown/podman-ps.1.md
@@ -213,8 +213,40 @@ fd7b786b5c32 docker.io/library/alpine:latest buildah 2 hours ago storage
f78620804e00 scratch buildah 2 hours ago storage working-container
```
-## ps
-Print a list of containers
+List containers with their associated pods.
+```
+$ podman ps --pod
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES POD ID PODNAME
+4089df24d4f3 docker.io/library/nginx:latest nginx 2 minutes ago Up 2 minutes 80/tcp webserver 1234567890ab web-pod
+92f58933c28c docker.io/library/redis:latest redis 3 minutes ago Up 3 minutes 6379/tcp cache 1234567890ab web-pod
+a1b2c3d4e5f6 docker.io/library/centos:latest /bin/bash 1 minute ago Up 1 minute standalone-container
+```
+
+List all containers with pod information, including those not in pods.
+```
+$ podman ps -a --pod
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES POD ID PODNAME
+4089df24d4f3 docker.io/library/nginx:latest nginx 2 minutes ago Up 2 minutes 80/tcp webserver 1234567890ab web-pod
+92f58933c28c docker.io/library/redis:latest redis 3 minutes ago Up 3 minutes 6379/tcp cache 1234567890ab web-pod
+69ed779d8ef9f redis:alpine redis 25 hours ago Exited (0) 25 hours ago 6379/tcp old-cache 5678901234cd old-pod
+a1b2c3d4e5f6 docker.io/library/centos:latest /bin/bash 1 minute ago Up 1 minute standalone-container
+```
+
+Filter containers by pod name.
+```
+$ podman ps --filter pod=web-pod
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+4089df24d4f3 docker.io/library/nginx:latest nginx 2 minutes ago Up 2 minutes 80/tcp webserver
+92f58933c28c docker.io/library/redis:latest redis 3 minutes ago Up 3 minutes 6379/tcp cache
+```
+
+Use custom format to show container and pod information.
+```
+$ podman ps --format "{{.Names}} is in pod {{.PodName}} ({{.Pod}})"
+webserver is in pod web-pod (1234567890ab)
+cache is in pod web-pod (1234567890ab)
+standalone-container is in pod ()
+```
## SEE ALSO
**[podman(1)](podman.1.md)**, **[buildah(1)](https://github.com/containers/buildah/blob/main/docs/buildah.1.md)**, **[crio(8)](https://github.com/cri-o/cri-o/blob/main/docs/crio.8.md)**
diff --git a/docs/source/markdown/podman-push.1.md.in b/docs/source/markdown/podman-push.1.md.in
index f22aab8ec04..093182f0482 100644
--- a/docs/source/markdown/podman-push.1.md.in
+++ b/docs/source/markdown/podman-push.1.md.in
@@ -102,7 +102,7 @@ Add a sigstore signature at the destination using a private key at the specified
@@option tls-verify
-## EXAMPLE
+## EXAMPLES
Push the specified image to a local directory:
```
diff --git a/docs/source/markdown/podman-rm.1.md.in b/docs/source/markdown/podman-rm.1.md.in
index 3c3e798fce4..b4dfc9f663a 100644
--- a/docs/source/markdown/podman-rm.1.md.in
+++ b/docs/source/markdown/podman-rm.1.md.in
@@ -75,7 +75,7 @@ The --force option must be specified to use the --time option.
Remove anonymous volumes associated with the container. This does not include named volumes
created with **podman volume create**, or the **--volume** option of **podman run** and **podman create**.
-## EXAMPLE
+## EXAMPLES
Remove container with a given name:
```
$ podman rm mywebserver
diff --git a/docs/source/markdown/podman-rmi.1.md b/docs/source/markdown/podman-rmi.1.md
index 4f07531c6fd..76d2b64a3ec 100644
--- a/docs/source/markdown/podman-rmi.1.md
+++ b/docs/source/markdown/podman-rmi.1.md
@@ -32,6 +32,8 @@ If a specified image does not exist in the local storage, ignore it and do not t
This option does not remove dangling parents of the specified image.
+## EXAMPLES
+
Remove an image by its short ID
```
$ podman rmi c0ed59d05ff7
@@ -59,6 +61,11 @@ Error: nothing: image not known
```
+Remove an image but keep any parents of it.
+```
+podman rmi --no-prune d29200bf974d
+Deleted: d29200bf974dbc48dc66c23c4031548531b6b5943e5f25ee7bda232e3b6b27f4
+```
## Exit Status
**0** All specified images removed
diff --git a/docs/source/markdown/podman-save.1.md.in b/docs/source/markdown/podman-save.1.md.in
index 342c39718fe..e5e96299f8a 100644
--- a/docs/source/markdown/podman-save.1.md.in
+++ b/docs/source/markdown/podman-save.1.md.in
@@ -16,12 +16,6 @@ file using the **output** flag. The **quiet** flag suppresses the output when se
To export the containers, use the **podman export**.
Note: `:` is a restricted character and cannot be part of the file name.
-**podman [GLOBAL OPTIONS]**
-
-**podman save [GLOBAL OPTIONS]**
-
-**podman save [OPTIONS] NAME[:TAG]**
-
## OPTIONS
@@option dir-compress
diff --git a/docs/source/markdown/podman-search.1.md.in b/docs/source/markdown/podman-search.1.md.in
index 9c171194d32..f999e6f728f 100644
--- a/docs/source/markdown/podman-search.1.md.in
+++ b/docs/source/markdown/podman-search.1.md.in
@@ -98,7 +98,6 @@ Do not truncate the output (default *false*).
Search for images containing the specified name, returning the first three images from each defined registry.
```
$ podman search --limit 3 fedora
-NAME DESCRIPTION
NAME DESCRIPTION
registry.fedoraproject.org/f29/fedora-toolbox
registry.fedoraproject.org/f30/fedora-toolbox
diff --git a/docs/source/markdown/podman-secret-inspect.1.md b/docs/source/markdown/podman-secret-inspect.1.md
index 50986bcf8cc..300a1b0aa3e 100644
--- a/docs/source/markdown/podman-secret-inspect.1.md
+++ b/docs/source/markdown/podman-secret-inspect.1.md
@@ -38,7 +38,7 @@ Print usage statement.
#### **--pretty**
-Print inspect output in human-readable format
+Print inspect output in human-readable format. Ignores fields from **--format**.
#### **--showsecret**
@@ -51,6 +51,12 @@ Inspect the secret mysecret.
$ podman secret inspect mysecret
```
+Inspect the secret mysecret and print it in a human readable format instead of JSON, with the default fields.
+
+```
+$ podman secret inspect --pretty mysecret
+```
+
Inspect the secret mysecret and display the Name and Labels field.
```
$ podman secret inspect --format "{{.Spec.Name}} {{.Spec.Labels}}" mysecret
diff --git a/docs/source/markdown/podman-secret-rm.1.md b/docs/source/markdown/podman-secret-rm.1.md
index 041f815067e..8dbef60f13d 100644
--- a/docs/source/markdown/podman-secret-rm.1.md
+++ b/docs/source/markdown/podman-secret-rm.1.md
@@ -36,6 +36,13 @@ Remove secrets mysecret1 and mysecret2.
$ podman secret rm mysecret1 mysecret2
```
+Remove all secrets
+```
+$ podman secret rm --all
+3fa78977c813cca1d5b1a4570
+4ee314533b16a47d0d8c6e775
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-secret(1)](podman-secret.1.md)**
diff --git a/docs/source/markdown/podman-system-connection-remove.1.md b/docs/source/markdown/podman-system-connection-remove.1.md
index ad351e9296a..0155cf5b3b0 100644
--- a/docs/source/markdown/podman-system-connection-remove.1.md
+++ b/docs/source/markdown/podman-system-connection-remove.1.md
@@ -21,6 +21,12 @@ Remove the specified system connection:
```
$ podman system connection remove production
```
+
+Remove all system connections:
+```
+$ podman system connection remove --all
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-system(1)](podman-system.1.md)**, **[podman-system-connection(1)](podman-system-connection.1.md)**
diff --git a/docs/source/markdown/podman-system-prune.1.md b/docs/source/markdown/podman-system-prune.1.md
index 95099d018c7..c1a1f95abeb 100644
--- a/docs/source/markdown/podman-system-prune.1.md
+++ b/docs/source/markdown/podman-system-prune.1.md
@@ -63,6 +63,69 @@ Print usage statement
Prune volumes currently unused by any container
+## EXAMPLES
+
+Prune all containers, pods, networks and dangling images.
+```
+$ podman system prune
+WARNING! This command removes:
+ - all stopped containers
+ - all networks not used by at least one container
+ - all dangling images
+ - all dangling build cache
+
+Are you sure you want to continue? [y/N] y
+Total reclaimed space: 0B
+```
+
+Prune all containers, pods, and networks that are not in use.
+```
+$ podman system prune --all
+WARNING! This command removes:
+ - all stopped containers
+ - all networks not used by at least one container
+ - all images without at least one container associated with them
+ - all build cache
+
+Are you sure you want to continue? [y/N] y
+Deleted Images
+ce21f047f73644dcb9cd55ad247433fb47ade48ad4f4e676881fbcb2d5735c76
+c874afc1f445044eaa821da0c32328707406e546a688d8c4c22587a5f88a1992
+2fce09cfad57c6de112654eeb6f6da1851f3ced1cff7ac0002378642c2c7ca84
+0cd6d2e072175191823999cd189f8d262ba5e460271095570d8cffb1d9072e9a
+172bdaffe628cc7b7f8b7b6695438afc612a9833382f5968a06740a3804c3b64
+bf07fec943ec23054f3b81c0e65926a1c83dc82c50933dc6372c60e09fdb2d4f
+b2f735cbb571dd6a28e66455af0623ecc81f9c5b74259d3e04b3bac3b178e965
+cea2ff433c610f5363017404ce989632e12b953114fefc6f597a58e813c15d61
+Deleted Networks
+podman-default-kube-network
+Total reclaimed space: 3.372GB
+```
+
+Prune all containers, build containers, pods, networks and dangling images.
+```
+$ podman system prune --build
+WARNING! This command removes:
+ - all stopped containers
+ - all networks not used by at least one container
+ - all build containers
+ - all dangling images
+ - all dangling build cache
+
+Are you sure you want to continue? [y/N] y
+Deleted Containers
+a8bfed41990114767c933d27bf5508b01cdc0f641dc36037b349648347c6ea64
+Deleted Images
+055733a33e7a78efa27d3c682df97a9e0489133bef071745144c8d0edda2d708
+Total reclaimed space: 1.4GB
+```
+
+With `--force` flag
+```
+$ podman system prune --force
+Total reclaimed space: 0B
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-system(1)](podman-system.1.md)**
diff --git a/docs/source/markdown/podman-system-reset.1.md b/docs/source/markdown/podman-system-reset.1.md
index bbc80fe5c7f..537a15d67c4 100644
--- a/docs/source/markdown/podman-system-reset.1.md
+++ b/docs/source/markdown/podman-system-reset.1.md
@@ -48,6 +48,11 @@ WARNING! This will remove:
Are you sure you want to continue? [y/N] y
```
+Force reset all storage back to a clean initialized state.
+```
+$ podman system reset --force
+```
+
### Switching rootless user from VFS driver to overlay with fuse-overlayfs
If the user ran rootless containers without having the `fuse-overlayfs` program
diff --git a/docs/source/markdown/podman-unmount.1.md.in b/docs/source/markdown/podman-unmount.1.md.in
index 8b8653c7f87..a7415cea14d 100644
--- a/docs/source/markdown/podman-unmount.1.md.in
+++ b/docs/source/markdown/podman-unmount.1.md.in
@@ -57,5 +57,10 @@ Unmount all containers:
podman unmount --all
```
+Force umount container with given ID:
+```
+podman umount --force containerID
+```
+
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-mount(1)](podman-mount.1.md)**, **[podman-image-mount(1)](podman-image-mount.1.md)**
diff --git a/docs/source/markdown/podman-update.1.md.in b/docs/source/markdown/podman-update.1.md.in
index 68b713f7609..4bfced600fb 100644
--- a/docs/source/markdown/podman-update.1.md.in
+++ b/docs/source/markdown/podman-update.1.md.in
@@ -95,21 +95,21 @@ Changing this setting resets the timer, depending on the state of the container.
@@option unsetenv.update
-## EXAMPLEs
+## EXAMPLES
-Update a container with a new cpu quota and period.
+Update a container with a new cpu quota and period:
```
-podman update --cpus=5 myCtr
+podman update --cpus=0.5 ctrID
```
-Update a container with all available options for cgroups v2.
+Update a container with multiple options at ones:
```
-podman update --cpus 5 --cpuset-cpus 0 --cpu-shares 123 --cpuset-mems 0 --memory 1G --memory-swap 2G --memory-reservation 2G --blkio-weight-device /dev/zero:123 --blkio-weight 123 --device-read-bps /dev/zero:10mb --device-write-bps /dev/zero:10mb --device-read-iops /dev/zero:1000 --device-write-iops /dev/zero:1000 --pids-limit 123 ctrID
-```
-
-Update a container with all available options for cgroups v1.
-```
-podman update --cpus 5 --cpuset-cpus 0 --cpu-shares 123 --cpuset-mems 0 --memory 1G --memory-swap 2G --memory-reservation 2G --memory-swappiness 50 --pids-limit 123 ctrID
+podman update --cpus 5 --cpuset-cpus 0 --cpu-shares 123 --cpuset-mems 0 \\
+ --memory 1G --memory-swap 2G --memory-reservation 2G \\
+ --blkio-weight-device /dev/sda:123 --blkio-weight 123 \\
+ --device-read-bps /dev/sda:10mb --device-write-bps /dev/sda:10mb \\
+ --device-read-iops /dev/sda:1000 --device-write-iops /dev/sda:1000 \\
+ --pids-limit 123 ctrID
```
## SEE ALSO
diff --git a/docs/source/markdown/podman-volume-create.1.md b/docs/source/markdown/podman-volume-create.1.md
index 67b97aa7b61..cf9bc8e2986 100644
--- a/docs/source/markdown/podman-volume-create.1.md
+++ b/docs/source/markdown/podman-volume-create.1.md
@@ -69,6 +69,34 @@ This option is mandatory when using the **image** driver.
When not using the **local** and **image** drivers, the given options are passed directly to the volume plugin. In this case, supported options are dictated by the plugin in question, not Podman.
+## QUOTAS
+
+`podman volume create` uses `XFS project quota controls` for controlling the size and the number of inodes of builtin volumes. The directory used to store the volumes must be an `XFS` file system and be mounted with the `pquota` option.
+
+Example /etc/fstab entry:
+```
+/dev/podman/podman-var /var xfs defaults,x-systemd.device-timeout=0,pquota 1 2
+```
+
+Podman generates project IDs for each builtin volume, but these project IDs need to be unique for the XFS file system. These project IDs by default are generated randomly, with a potential for overlap with other quotas on the same file
+system.
+
+The xfs_quota tool can be used to assign a project ID to the storage driver directory, e.g.:
+
+```
+echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
+echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
+echo storage:100000 >> /etc/projid
+echo volumes:200000 >> /etc/projid
+xfs_quota -x -c 'project -s storage volumes' /
+```
+
+In the example above we are configuring the overlay storage driver for newly
+created containers as well as volumes to use project IDs with a **start offset**.
+All containers are assigned larger project IDs (e.g. >= 100000).
+All volume assigned project IDs larger project IDs starting with 200000.
+This prevents xfs_quota management conflicts with containers/storage.
+
## EXAMPLES
Create empty volume.
@@ -101,34 +129,6 @@ Create image named volume using the specified local image in containers/storage.
# podman volume create --driver image --opt image=fedora:latest fedoraVol
```
-## QUOTAS
-
-`podman volume create` uses `XFS project quota controls` for controlling the size and the number of inodes of builtin volumes. The directory used to store the volumes must be an `XFS` file system and be mounted with the `pquota` option.
-
-Example /etc/fstab entry:
-```
-/dev/podman/podman-var /var xfs defaults,x-systemd.device-timeout=0,pquota 1 2
-```
-
-Podman generates project IDs for each builtin volume, but these project IDs need to be unique for the XFS file system. These project IDs by default are generated randomly, with a potential for overlap with other quotas on the same file
-system.
-
-The xfs_quota tool can be used to assign a project ID to the storage driver directory, e.g.:
-
-```
-echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
-echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
-echo storage:100000 >> /etc/projid
-echo volumes:200000 >> /etc/projid
-xfs_quota -x -c 'project -s storage volumes' /
-```
-
-In the example above we are configuring the overlay storage driver for newly
-created containers as well as volumes to use project IDs with a **start offset**.
-All containers are assigned larger project IDs (e.g. >= 100000).
-All volume assigned project IDs larger project IDs starting with 200000.
-This prevents xfs_quota management conflicts with containers/storage.
-
## MOUNT EXAMPLES
`podman volume create` allows the `type`, `device`, and `o` options to be passed to `mount(8)` when using the `local` driver.
diff --git a/docs/source/markdown/podmansh.1.md b/docs/source/markdown/podmansh.1.md
index 4bd49aaacf0..a4ca55548c9 100644
--- a/docs/source/markdown/podmansh.1.md
+++ b/docs/source/markdown/podmansh.1.md
@@ -46,7 +46,7 @@ After=local-fs.target
[Container]
Image=registry.fedoraproject.org/fedora
ContainerName=podmansh
-RemapUsers=keep-id
+UserNS=keep-id
RunInit=yes
DropCapability=all
NoNewPrivileges=true
@@ -74,7 +74,7 @@ After=local-fs.target
[Container]
Image=registry.fedoraproject.org/fedora
ContainerName=podmansh
-RemapUsers=keep-id
+UserNS=keep-id
RunInit=yes
Volume=%h/data:%h:Z
@@ -104,7 +104,7 @@ After=local-fs.target
[Container]
Image=registry.fedoraproject.org/fedora
ContainerName=podmansh
-RemapUsers=keep-id
+UserNS=keep-id
RunInit=yes
PodmanArgs=--security-opt=unmask=/sys/fs/selinux \
--security-opt=label=nested \
diff --git a/go.mod b/go.mod
index 3aa349e7452..0ee4a280b15 100644
--- a/go.mod
+++ b/go.mod
@@ -12,11 +12,11 @@ require (
github.com/checkpoint-restore/checkpointctl v1.3.0
github.com/checkpoint-restore/go-criu/v7 v7.2.0
github.com/containernetworking/plugins v1.7.1
- github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1
- github.com/containers/common v0.63.1-0.20250602154905-5a4ca2d5d355
+ github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30
+ github.com/containers/common v0.63.2-0.20250604184922-bb2062b6265c
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.8.6
- github.com/containers/image/v5 v5.35.1-0.20250526152843-c64593da00e4
+ github.com/containers/image/v5 v5.35.1-0.20250603145948-347a6e7283ef
github.com/containers/libhvee v0.10.0
github.com/containers/ocicrypt v1.2.1
github.com/containers/psgo v1.9.0
@@ -27,7 +27,7 @@ require (
github.com/cyphar/filepath-securejoin v0.4.1
github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242
github.com/docker/distribution v2.8.3+incompatible
- github.com/docker/docker v28.1.1+incompatible
+ github.com/docker/docker v28.2.2+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8
github.com/docker/go-units v0.5.0
@@ -46,6 +46,7 @@ require (
github.com/mattn/go-shellwords v1.0.12
github.com/mattn/go-sqlite3 v1.14.28
github.com/mdlayher/vsock v1.2.1
+ github.com/moby/docker-image-spec v1.3.1
github.com/moby/sys/capability v0.4.0
github.com/moby/sys/user v0.4.0
github.com/moby/term v0.5.2
@@ -58,22 +59,22 @@ require (
github.com/opencontainers/runtime-spec v1.2.1
github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2
github.com/opencontainers/selinux v1.12.0
- github.com/openshift/imagebuilder v1.2.16-0.20250224193648-e87e4e105fd8
+ github.com/openshift/imagebuilder v1.2.16
github.com/rootless-containers/rootlesskit/v2 v2.3.5
github.com/shirou/gopsutil/v4 v4.25.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/stretchr/testify v1.10.0
- github.com/vbauerster/mpb/v8 v8.10.1
+ github.com/vbauerster/mpb/v8 v8.10.2
github.com/vishvananda/netlink v1.3.1
- go.etcd.io/bbolt v1.4.0
- golang.org/x/crypto v0.38.0
- golang.org/x/net v0.40.0
- golang.org/x/sync v0.14.0
+ go.etcd.io/bbolt v1.4.1
+ golang.org/x/crypto v0.39.0
+ golang.org/x/net v0.41.0
+ golang.org/x/sync v0.15.0
golang.org/x/sys v0.33.0
golang.org/x/term v0.32.0
- golang.org/x/text v0.25.0
+ golang.org/x/text v0.26.0
google.golang.org/protobuf v1.36.6
gopkg.in/inf.v0 v0.9.1
gopkg.in/yaml.v3 v3.0.1
@@ -113,7 +114,7 @@ require (
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fsouza/go-dockerclient v1.12.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
@@ -123,10 +124,11 @@ require (
github.com/go-openapi/loads v0.22.0 // indirect
github.com/go-openapi/runtime v0.28.0 // indirect
github.com/go-openapi/spec v0.21.0 // indirect
- github.com/go-openapi/strfmt v0.23.0 // indirect
+ github.com/go-openapi/strfmt v0.23.1-0.20250509134642-64a09ef0e084 // indirect
github.com/go-openapi/swag v0.23.1 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
@@ -150,9 +152,7 @@ require (
github.com/mdlayher/socket v0.5.1 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
- github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/buildkit v0.22.0 // indirect
- github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.1.0 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
@@ -164,7 +164,6 @@ require (
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/runc v1.3.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
- github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/sftp v1.13.9 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
@@ -201,10 +200,10 @@ require (
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
- golang.org/x/mod v0.24.0 // indirect
+ golang.org/x/mod v0.25.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/time v0.11.0 // indirect
- golang.org/x/tools v0.32.0 // indirect
+ golang.org/x/tools v0.33.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
google.golang.org/grpc v1.71.0 // indirect
diff --git a/go.sum b/go.sum
index 641a3bd5f0f..7aa616fc8d6 100644
--- a/go.sum
+++ b/go.sum
@@ -1,8 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
-github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
-github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
@@ -66,16 +64,16 @@ github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEm
github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4=
github.com/containernetworking/plugins v1.7.1 h1:CNAR0jviDj6FS5Vg85NTgKWLDzZPfi/lj+VJfhMDTIs=
github.com/containernetworking/plugins v1.7.1/go.mod h1:xuMdjuio+a1oVQsHKjr/mgzuZ24leAsqUYRnzGoXHy0=
-github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1 h1:3bNWDmqh9tx0iAXPzBJugj/oC0nTD9yTXCyIu/Mj/LE=
-github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1/go.mod h1:8BVLrM6nRl/dRMYxZ+TrmoWPXzkCY99rZOYvJoXpIyE=
-github.com/containers/common v0.63.1-0.20250602154905-5a4ca2d5d355 h1:vK7TVpONcQzWHR4dAEnLkLeCrKNB61UhLDpwAXFIIto=
-github.com/containers/common v0.63.1-0.20250602154905-5a4ca2d5d355/go.mod h1:efNRNweihnq5nXALnAPDXTpC7uJtnFV4pNuETTfvI8s=
+github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30 h1:kCt0fnVBvXY9J98pUDeUc0gHKrhRwaBTWWD3otLutCE=
+github.com/containers/buildah v1.40.1-0.20250604193037-b8d8cc375f30/go.mod h1:QDecwvjrr+e0VD5GYv2dw7tsiqrz673r8B4rIYFP11Y=
+github.com/containers/common v0.63.2-0.20250604184922-bb2062b6265c h1:j4epZCkQt8Jdpz2GsUzvqY4MfaOfJamrNpZnmbV84Ug=
+github.com/containers/common v0.63.2-0.20250604184922-bb2062b6265c/go.mod h1:efNRNweihnq5nXALnAPDXTpC7uJtnFV4pNuETTfvI8s=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/gvisor-tap-vsock v0.8.6 h1:9SeAXK+K2o36CtrgYk6zRXbU3zrayjvkrI8b7/O6u5A=
github.com/containers/gvisor-tap-vsock v0.8.6/go.mod h1:+0mtKmm4STeSDnZe+DGnIwN4EH2f7AcWir7PwT28Ti0=
-github.com/containers/image/v5 v5.35.1-0.20250526152843-c64593da00e4 h1:7rvPvBNGjNfgjAmRZhlxA7ooBbLalqTTGoHa9DBVnBY=
-github.com/containers/image/v5 v5.35.1-0.20250526152843-c64593da00e4/go.mod h1:JAywiXYidI9NBfCvggVF80nYVAsYrNSRpvHKnalbZG0=
+github.com/containers/image/v5 v5.35.1-0.20250603145948-347a6e7283ef h1:sXXyXq3r6nJtwAPx+vnzhakShOM1KJBUpT5e/tZ3zto=
+github.com/containers/image/v5 v5.35.1-0.20250603145948-347a6e7283ef/go.mod h1:tOeAv2LI5fS7gsLlBMhIx46WeiBvvBOwjM4kadtziGQ=
github.com/containers/libhvee v0.10.0 h1:7VLv8keWZpHuGmWvyY4c1mVH5V1JYb1G78VC+8AlrM0=
github.com/containers/libhvee v0.10.0/go.mod h1:at0h8lRcK5jCKfQgU/e6Io0Mw12F36zRLjXVOXRoDTM=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@@ -117,12 +115,12 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
-github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A=
+github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
-github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
+github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@@ -153,8 +151,8 @@ github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@@ -174,8 +172,8 @@ github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsF
github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
-github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
-github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
+github.com/go-openapi/strfmt v0.23.1-0.20250509134642-64a09ef0e084 h1:PNIpnlKt8VYiQuxzI48nNqM3M0ZW+PPBMv/LTEQlNDo=
+github.com/go-openapi/strfmt v0.23.1-0.20250509134642-64a09ef0e084/go.mod h1:WHBPDONkZMEwENrJXFU37tIde3N8Q1lrlHSlXbF49LE=
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
@@ -186,6 +184,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.1-0.20241109141217-c266b19b28e9 h1:Kzr9J0S0V2PRxiX6B6xw1kWjzsIyjLO2Ibi4fNTaYBM=
github.com/godbus/dbus/v5 v5.1.1-0.20241109141217-c266b19b28e9/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
@@ -315,8 +315,6 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
-github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
-github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.22.0 h1:aWN06w1YGSVN1XfeZbj2ZbgY+zi5xDAjEFI8Cy9fTjA=
github.com/moby/buildkit v0.22.0/go.mod h1:j4pP5hxiTWcz7xuTK2cyxQislHl/N2WWHzOy43DlLJw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@@ -370,12 +368,10 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 h1:
github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2/go.mod h1:MXdPzqAA8pHC58USHqNCSjyLnRQ6D+NjbpP+02Z1U/0=
github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
-github.com/openshift/imagebuilder v1.2.16-0.20250224193648-e87e4e105fd8 h1:iPRNMpzJ4HEtIXseOxdIkgNYlp7HJShtEk7URm1BUSU=
-github.com/openshift/imagebuilder v1.2.16-0.20250224193648-e87e4e105fd8/go.mod h1:gASl6jikVG3bCFnLjG6Ow5TeKwKVvrqUUj8C7EUmqc8=
+github.com/openshift/imagebuilder v1.2.16 h1:Vqjy5uPoVDJiX5JUKHo0Cf440ih5cKI7lVe2ZJ2X+RA=
+github.com/openshift/imagebuilder v1.2.16/go.mod h1:gASl6jikVG3bCFnLjG6Ow5TeKwKVvrqUUj8C7EUmqc8=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
-github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -396,8 +392,8 @@ github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glE
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
@@ -477,8 +473,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
-github.com/vbauerster/mpb/v8 v8.10.1 h1:t/ZFv/NYgoBUy2LrmkD5Vc25r+JhoS4+gRkjVbolO2Y=
-github.com/vbauerster/mpb/v8 v8.10.1/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0=
+github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM=
+github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0=
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
@@ -505,8 +501,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk=
-go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk=
+go.etcd.io/bbolt v1.4.1 h1:5mOV+HWjIPLEAlUGMsveaUvK2+byZMFOzojoi7bh7uI=
+go.etcd.io/bbolt v1.4.1/go.mod h1:c8zu2BnXWTu2XM4XcICtbGSl9cFwsXtcf9zLt2OncM8=
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
@@ -544,8 +540,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
-golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
-golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
+golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
@@ -560,8 +556,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
-golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
+golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -579,8 +575,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
-golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
-golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
+golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
+golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
@@ -596,8 +592,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
-golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -646,8 +642,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
-golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
-golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -663,8 +659,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
-golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
-golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
+golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
+golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/hack/libdm_tag.sh b/hack/libdm_tag.sh
deleted file mode 100755
index 815b5d914ed..00000000000
--- a/hack/libdm_tag.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-tmpdir="$PWD/tmp.$RANDOM"
-mkdir -p "$tmpdir"
-trap 'rm -fr "$tmpdir"' EXIT
-${CC:-cc} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS} -o "$tmpdir"/libdm_tag -x c - -ldevmapper > /dev/null 2> /dev/null << EOF
-#include
-int main() {
- struct dm_task *task;
- dm_task_deferred_remove(task);
- return 0;
-}
-EOF
-if test $? -ne 0 ; then
- echo libdm_no_deferred_remove
-fi
diff --git a/hack/sqlite_tag.sh b/hack/sqlite_tag.sh
new file mode 100755
index 00000000000..0248355cf37
--- /dev/null
+++ b/hack/sqlite_tag.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+${CPP:-${CC:-cc} -E} ${CPPFLAGS} - &> /dev/null << EOF
+#include
+EOF
+if test $? -eq 0 ; then
+ echo libsqlite3
+fi
diff --git a/libpod/container.go b/libpod/container.go
index ff5839a549b..1f5d2298a6b 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -300,6 +300,11 @@ type ContainerArtifactVolume struct {
// the title annotation exist.
// Optional. Conflicts with Title.
Digest string `json:"digest"`
+ // Name is the name that should be used for the path inside the container. When a single blob
+ // is mounted the name is used as is. If multiple blobs are mounted then mount them as
+ // "-x" where x is a 0 indexed integer based on the layer order.
+ // Optional.
+ Name string `json:"name,omitempty"`
}
// ContainerSecret is a secret that is mounted in a container
diff --git a/libpod/container_internal_common.go b/libpod/container_internal_common.go
index b6196dba7e4..efb6074d726 100644
--- a/libpod/container_internal_common.go
+++ b/libpod/container_internal_common.go
@@ -554,19 +554,31 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc
return nil, nil, err
}
- // Ignore the error, destIsFile will return false with errors so if the file does not exist
- // we treat it as dir, the oci runtime will always create the target bind mount path.
- destIsFile, _ := containerPathIsFile(c.state.Mountpoint, artifactMount.Dest)
+ destIsFile, err := containerPathIsFile(c.state.Mountpoint, artifactMount.Dest)
+ // When the file does not exists and the artifact has only a single blob to mount
+ // assume it is a file so we use the dest path as direct mount.
+ if err != nil && len(paths) == 1 && errors.Is(err, fs.ErrNotExist) {
+ destIsFile = true
+ }
if destIsFile && len(paths) > 1 {
return nil, nil, fmt.Errorf("artifact %q contains more than one blob and container path %q is a file", artifactMount.Source, artifactMount.Dest)
}
- for _, path := range paths {
+ for i, path := range paths {
var dest string
if destIsFile {
dest = artifactMount.Dest
} else {
- dest = filepath.Join(artifactMount.Dest, path.Name)
+ var filename string
+ if artifactMount.Name != "" {
+ filename = artifactMount.Name
+ if len(paths) > 1 {
+ filename += "-" + strconv.Itoa(i)
+ }
+ } else {
+ filename = path.Name
+ }
+ dest = filepath.Join(artifactMount.Dest, filename)
}
logrus.Debugf("Mounting artifact %q in container %s, mount blob %q to %q", artifactMount.Source, c.ID(), path.SourcePath, dest)
diff --git a/libpod/define/info.go b/libpod/define/info.go
index bc37d813967..967a664c99b 100644
--- a/libpod/define/info.go
+++ b/libpod/define/info.go
@@ -66,6 +66,8 @@ type HostInfo struct {
Uptime string `json:"uptime"`
Variant string `json:"variant"`
Linkmode string `json:"linkmode"`
+
+ EmulatedArchitectures []string `json:"emulatedArchitectures,omitempty"`
}
// RemoteSocket describes information about the API socket
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 009f6ea5d0b..97d9ef875bc 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -34,7 +34,7 @@ func newLogFileEventer(options EventerOptions) (*EventLogFile, error) {
}
// We have to make sure the file is created otherwise reading events will hang.
// https://github.com/containers/podman/issues/15688
- fd, err := os.OpenFile(options.LogFilePath, os.O_RDONLY|os.O_CREATE, 0700)
+ fd, err := os.OpenFile(options.LogFilePath, os.O_RDONLY|os.O_CREATE, 0600)
if err != nil {
return nil, fmt.Errorf("failed to create event log file: %w", err)
}
@@ -68,6 +68,7 @@ func (e EventLogFile) writeString(s string) error {
if err != nil {
return err
}
+ defer f.Close()
return writeToFile(s, f)
}
@@ -168,7 +169,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
event, err := newEventFromJSONString(line.Text)
if err != nil {
- err := fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
+ err := fmt.Errorf("event type is not valid in %s", e.options.LogFilePath)
options.EventChannel <- ReadResult{Error: err}
continue
}
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index 2462936728b..45b6c042c30 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -28,8 +28,9 @@ import (
dockerContainer "github.com/docker/docker/api/types/container"
dockerImage "github.com/docker/docker/api/types/image"
dockerStorage "github.com/docker/docker/api/types/storage"
- "github.com/docker/go-connections/nat"
+ dockerSpec "github.com/moby/docker-image-spec/specs-go/v1"
"github.com/opencontainers/go-digest"
+ imageSpec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
@@ -353,22 +354,20 @@ func imageDataToImageInspect(ctx context.Context, l *libimage.Image) (*handlers.
if err != nil {
return nil, err
}
- ports, err := portsToPortSet(info.Config.ExposedPorts)
- if err != nil {
- return nil, err
- }
// TODO: many fields in Config still need wiring
- config := dockerContainer.Config{
- User: info.User,
- ExposedPorts: ports,
- Env: info.Config.Env,
- Cmd: info.Config.Cmd,
- Volumes: info.Config.Volumes,
- WorkingDir: info.Config.WorkingDir,
- Entrypoint: info.Config.Entrypoint,
- Labels: info.Labels,
- StopSignal: info.Config.StopSignal,
+ config := dockerSpec.DockerOCIImageConfig{
+ ImageConfig: imageSpec.ImageConfig{
+ User: info.User,
+ ExposedPorts: info.Config.ExposedPorts,
+ Env: info.Config.Env,
+ Cmd: info.Config.Cmd,
+ Volumes: info.Config.Volumes,
+ WorkingDir: info.Config.WorkingDir,
+ Entrypoint: info.Config.Entrypoint,
+ Labels: info.Labels,
+ StopSignal: info.Config.StopSignal,
+ },
}
rootfs := dockerImage.RootFS{}
@@ -413,33 +412,6 @@ func imageDataToImageInspect(ctx context.Context, l *libimage.Image) (*handlers.
return &handlers.ImageInspect{InspectResponse: dockerImageInspect}, nil
}
-// portsToPortSet converts libpod's exposed ports to docker's structs
-func portsToPortSet(input map[string]struct{}) (nat.PortSet, error) {
- ports := make(nat.PortSet)
- for k := range input {
- proto, port := nat.SplitProtoPort(k)
- switch proto {
- // See the OCI image spec for details:
- // https://github.com/opencontainers/image-spec/blob/e562b04403929d582d449ae5386ff79dd7961a11/config.md#properties
- case "tcp", "":
- p, err := nat.NewPort("tcp", port)
- if err != nil {
- return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err)
- }
- ports[p] = struct{}{}
- case "udp":
- p, err := nat.NewPort("udp", port)
- if err != nil {
- return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err)
- }
- ports[p] = struct{}{}
- default:
- return nil, fmt.Errorf("invalid port proto %q in %q", proto, k)
- }
- }
- return ports, nil
-}
-
func GetImages(w http.ResponseWriter, r *http.Request) {
decoder := utils.GetDecoder(r)
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
diff --git a/pkg/api/handlers/compat/info.go b/pkg/api/handlers/compat/info.go
index b4c82910f4f..c6dc7e2c666 100644
--- a/pkg/api/handlers/compat/info.go
+++ b/pkg/api/handlers/compat/info.go
@@ -56,8 +56,6 @@ func GetInfo(w http.ResponseWriter, r *http.Request) {
info := &handlers.Info{
Info: dockerSystem.Info{
Architecture: goRuntime.GOARCH,
- BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled,
- BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,
CPUCfsPeriod: sysInfo.CPUCfsPeriod,
CPUCfsQuota: sysInfo.CPUCfsQuota,
CPUSet: sysInfo.Cpuset,
diff --git a/pkg/api/handlers/compat/system.go b/pkg/api/handlers/compat/system.go
index a8b42e57360..ac926d6a685 100644
--- a/pkg/api/handlers/compat/system.go
+++ b/pkg/api/handlers/compat/system.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/podman/v5/pkg/domain/entities"
"github.com/containers/podman/v5/pkg/domain/infra/abi"
docker "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/container"
dockerImage "github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/volume"
@@ -94,7 +95,7 @@ func GetDiskUsage(w http.ResponseWriter, r *http.Request) {
Images: imgs,
Containers: ctnrs,
Volumes: vols,
- BuildCache: []*docker.BuildCache{},
+ BuildCache: []*build.CacheRecord{},
BuilderSize: 0,
}})
}
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index fd7c32eea96..204c851edc7 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/podman/v5/libpod/define"
"github.com/containers/podman/v5/pkg/domain/entities"
"github.com/containers/podman/v5/pkg/domain/entities/reports"
+ "github.com/containers/podman/v5/pkg/emulation"
"github.com/containers/podman/v5/pkg/util"
"github.com/containers/storage"
"github.com/containers/storage/pkg/directory"
@@ -27,6 +28,9 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
if err != nil {
return nil, err
}
+
+ info.Host.EmulatedArchitectures = emulation.Registered()
+
info.Host.RemoteSocket = &define.RemoteSocket{Path: ic.Libpod.RemoteURI()}
// `podman system connection add` invokes podman via ssh to fill in connection string. Here
diff --git a/pkg/machine/define/errors.go b/pkg/machine/define/errors.go
index 5b241c7566c..6252e892548 100644
--- a/pkg/machine/define/errors.go
+++ b/pkg/machine/define/errors.go
@@ -8,9 +8,11 @@ import (
)
var (
- ErrWrongState = errors.New("VM in wrong state to perform action")
- ErrVMAlreadyExists = errors.New("VM already exists")
- ErrNotImplemented = errors.New("functionality not implemented")
+ ErrWrongState = errors.New("VM in wrong state to perform action")
+ ErrVMAlreadyExists = errors.New("VM already exists")
+ ErrNotImplemented = errors.New("functionality not implemented")
+ ErrInitRelaunchAttempt = errors.New("stopping execution: 'init' relaunched with --reexec flag to reinitialize the VM")
+ ErrRebootInitiated = errors.New("system reboot initiated")
)
type ErrVMRunningCannotDestroyed struct {
diff --git a/pkg/machine/e2e/init_test.go b/pkg/machine/e2e/init_test.go
index 684e47e9d99..e1bbca40513 100644
--- a/pkg/machine/e2e/init_test.go
+++ b/pkg/machine/e2e/init_test.go
@@ -542,6 +542,7 @@ var _ = Describe("podman machine init", func() {
})
It("machine init with rosetta=true", func() {
+ Skip("rosetta currently hard disabled https://github.com/containers/podman-machine-os/pull/134")
skipIfVmtype(define.QemuVirt, "Test is only for AppleHv")
skipIfVmtype(define.WSLVirt, "Test is only for AppleHv")
skipIfVmtype(define.HyperVVirt, "Test is only for AppleHv")
diff --git a/pkg/machine/ignition/ignition.go b/pkg/machine/ignition/ignition.go
index 9f3fcaf6e49..b381daff202 100644
--- a/pkg/machine/ignition/ignition.go
+++ b/pkg/machine/ignition/ignition.go
@@ -182,46 +182,6 @@ func (ign *DynamicIgnition) GenerateIgnitionConfig() error {
}
}
- // This service gets environment variables that are provided
- // through qemu fw_cfg and then sets them into systemd/system.conf.d,
- // profile.d and environment.d files
- //
- // Currently, it is used for propagating
- // proxy settings e.g. HTTP_PROXY and others, on a start avoiding
- // a need of re-creating/re-initiating a VM
-
- envset := parser.NewUnitFile()
- envset.Add("Unit", "Description", "Environment setter from QEMU FW_CFG")
-
- envset.Add("Service", "Type", "oneshot")
- envset.Add("Service", "RemainAfterExit", "yes")
- envset.Add("Service", "Environment", "FWCFGRAW=/sys/firmware/qemu_fw_cfg/by_name/opt/com.coreos/environment/raw")
- envset.Add("Service", "Environment", "SYSTEMD_CONF=/etc/systemd/system.conf.d/default-env.conf")
- envset.Add("Service", "Environment", "ENVD_CONF=/etc/environment.d/default-env.conf")
- envset.Add("Service", "Environment", "PROFILE_CONF=/etc/profile.d/default-env.sh")
- envset.Add("Service", "ExecStart", `/usr/bin/bash -c '/usr/bin/test -f ${FWCFGRAW} &&\
- echo "[Manager]\n#Got from QEMU FW_CFG\nDefaultEnvironment=$(/usr/bin/base64 -d ${FWCFGRAW} | sed -e "s+|+ +g")\n" > ${SYSTEMD_CONF} ||\
- echo "[Manager]\n#Got nothing from QEMU FW_CFG\n#DefaultEnvironment=\n" > ${SYSTEMD_CONF}'`)
- envset.Add("Service", "ExecStart", `/usr/bin/bash -c '/usr/bin/test -f ${FWCFGRAW} && (\
- echo "#Got from QEMU FW_CFG"> ${ENVD_CONF};\
- IFS="|";\
- for iprxy in $(/usr/bin/base64 -d ${FWCFGRAW}); do\
- echo "$iprxy" >> ${ENVD_CONF}; done ) || \
- echo "#Got nothing from QEMU FW_CFG"> ${ENVD_CONF}'`)
- envset.Add("Service", "ExecStart", `/usr/bin/bash -c '/usr/bin/test -f ${FWCFGRAW} && (\
- echo "#Got from QEMU FW_CFG"> ${PROFILE_CONF};\
- IFS="|";\
- for iprxy in $(/usr/bin/base64 -d ${FWCFGRAW}); do\
- echo "export $iprxy" >> ${PROFILE_CONF}; done ) || \
- echo "#Got nothing from QEMU FW_CFG"> ${PROFILE_CONF}'`)
- envset.Add("Service", "ExecStartPost", "/usr/bin/systemctl daemon-reload")
-
- envset.Add("Install", "WantedBy", "sysinit.target")
- envsetFile, err := envset.ToString()
- if err != nil {
- return err
- }
-
ignSystemd := Systemd{
Units: []Unit{
{
@@ -239,16 +199,6 @@ func (ign *DynamicIgnition) GenerateIgnitionConfig() error {
},
}
- // Only qemu has the qemu firmware environment setting
- if ign.VMType == define.QemuVirt {
- qemuUnit := Unit{
- Enabled: BoolToPtr(true),
- Name: "envset-fwcfg.service",
- Contents: &envsetFile,
- }
- ignSystemd.Units = append(ignSystemd.Units, qemuUnit)
- }
-
// Only AppleHv with Apple Silicon can use Rosetta
if ign.VMType == define.AppleHvVirt && runtime.GOARCH == "arm64" {
rosettaUnit := Systemd{
@@ -681,24 +631,6 @@ func (i *IgnitionBuilder) Build() error {
return i.dynamicIgnition.Write()
}
-func GetNetRecoveryFile() string {
- return `#!/bin/bash
-# Verify network health, and bounce the network device if host connectivity
-# is lost. This is a temporary workaround for a known rare qemu/virtio issue
-# that affects some systems
-
-sleep 120 # allow time for network setup on initial boot
-while true; do
- sleep 30
- curl -s -o /dev/null --max-time 30 http://192.168.127.1/health
- if [ "$?" != "0" ]; then
- echo "bouncing nic due to loss of connectivity with host"
- ifconfig enp0s1 down; ifconfig enp0s1 up
- fi
-done
-`
-}
-
func (i *IgnitionBuilder) AddPlaybook(contents string, destPath string, username string) error {
// create the ignition file object
f := File{
@@ -744,19 +676,6 @@ func (i *IgnitionBuilder) AddPlaybook(contents string, destPath string, username
return nil
}
-func GetNetRecoveryUnitFile() *parser.UnitFile {
- recoveryUnit := parser.NewUnitFile()
- recoveryUnit.Add("Unit", "Description", "Verifies health of network and recovers if necessary")
- recoveryUnit.Add("Unit", "After", "sshd.socket sshd.service")
- recoveryUnit.Add("Service", "ExecStart", "/usr/local/bin/net-health-recovery.sh")
- recoveryUnit.Add("Service", "StandardOutput", "journal")
- recoveryUnit.Add("Service", "StandardError", "journal")
- recoveryUnit.Add("Service", "StandardInput", "null")
- recoveryUnit.Add("Install", "WantedBy", "default.target")
-
- return recoveryUnit
-}
-
func DefaultReadyUnitFile() parser.UnitFile {
u := parser.NewUnitFile()
u.Add("Unit", "After", "sshd.socket sshd.service")
diff --git a/pkg/machine/ocipull/ociartifact.go b/pkg/machine/ocipull/ociartifact.go
index 9dc81c43fb6..1237c3a40c5 100644
--- a/pkg/machine/ocipull/ociartifact.go
+++ b/pkg/machine/ocipull/ociartifact.go
@@ -27,7 +27,6 @@ const (
artifactRegistry = "quay.io"
artifactRepo = "podman"
artifactImageName = "machine-os"
- artifactImageNameWSL = "machine-os-wsl"
artifactOriginalName = "org.opencontainers.image.title"
machineOS = "linux"
)
@@ -95,13 +94,7 @@ func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, endpoint
cache := false
if endpoint == "" {
- // The OCI artifact containing the OS image for WSL has a different
- // image name. This should be temporary and dropped as soon as the
- // OS image for WSL is built from fedora-coreos too (c.f. RUN-2178).
imageName := artifactImageName
- if vmType == define.WSLVirt {
- imageName = artifactImageNameWSL
- }
endpoint = fmt.Sprintf("docker://%s/%s/%s:%s", artifactRegistry, artifactRepo, imageName, artifactVersion.majorMinor())
cache = true
}
diff --git a/pkg/machine/shim/host.go b/pkg/machine/shim/host.go
index 198d63f76f1..4ebc41cf734 100644
--- a/pkg/machine/shim/host.go
+++ b/pkg/machine/shim/host.go
@@ -100,8 +100,12 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) error {
if err != nil {
return err
}
- machineLock.Lock()
- defer machineLock.Unlock()
+
+ // If the machine is being re-launched, the lock is already held
+ if !opts.ReExec {
+ machineLock.Lock()
+ defer machineLock.Unlock()
+ }
mc, err := vmconfigs.NewMachineConfig(opts, dirs, sshIdentityPath, mp.VMType(), machineLock)
if err != nil {
@@ -111,8 +115,9 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) error {
mc.Version = vmconfigs.MachineConfigVersion
createOpts := machineDefine.CreateVMOpts{
- Name: opts.Name,
- Dirs: dirs,
+ Name: opts.Name,
+ Dirs: dirs,
+ ReExec: opts.ReExec,
}
if umn := opts.UserModeNetworking; umn != nil {
@@ -264,6 +269,14 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) error {
}
ignBuilder.WithUnit(readyUnit)
+ // CreateVM could cause the init command to be re-launched in some cases (e.g. wsl)
+ // so we need to avoid creating the machine config or connections before this check happens.
+ // when relaunching, the invoked 'init' command will be responsible to set up the machine
+ err = mp.CreateVM(createOpts, mc, &ignBuilder)
+ if err != nil {
+ return err
+ }
+
// TODO AddSSHConnectionToPodmanSocket could take an machineconfig instead
if err := connection.AddSSHConnectionsToPodmanSocket(mc.HostUser.UID, mc.SSH.Port, mc.SSH.IdentityPath, mc.Name, mc.SSH.RemoteUsername, opts); err != nil {
return err
@@ -278,11 +291,6 @@ func Init(opts machineDefine.InitOptions, mp vmconfigs.VMProvider) error {
}
callbackFuncs.Add(cleanup)
- err = mp.CreateVM(createOpts, mc, &ignBuilder)
- if err != nil {
- return err
- }
-
if len(opts.IgnitionPath) == 0 {
if err := ignBuilder.Build(); err != nil {
return err
diff --git a/pkg/machine/wsl/declares.go b/pkg/machine/wsl/declares.go
index 0145ae86bfa..aed26c998a9 100644
--- a/pkg/machine/wsl/declares.go
+++ b/pkg/machine/wsl/declares.go
@@ -175,17 +175,6 @@ http://docs.microsoft.com/en-us/windows/wsl/install
`
-const wslKernelError = `Could not %s. See previous output for any potential failure details.
-If you can not resolve the issue, try rerunning the "podman machine init command". If that fails
-try the "wsl --update" command and then rerun "podman machine init". Finally, if all else fails,
-try following the steps outlined in the following article:
-
-http://docs.microsoft.com/en-us/windows/wsl/install
-
-`
-
-const wslInstallKernel = "install the WSL Kernel"
-
const wslOldVersion = `Automatic installation of WSL can not be performed on this version of Windows
Either update to Build 19041 (or later), or perform the manual installation steps
outlined in the following article:
diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go
index 2c6ee39ac52..ab8482b1897 100644
--- a/pkg/machine/wsl/machine.go
+++ b/pkg/machine/wsl/machine.go
@@ -13,7 +13,6 @@ import (
"path/filepath"
"strconv"
"strings"
- "time"
"github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/strongunits"
@@ -22,7 +21,6 @@ import (
"github.com/containers/podman/v5/pkg/machine/env"
"github.com/containers/podman/v5/pkg/machine/ignition"
"github.com/containers/podman/v5/pkg/machine/vmconfigs"
- "github.com/containers/podman/v5/pkg/machine/wsl/wutil"
"github.com/containers/podman/v5/utils"
"github.com/containers/storage/pkg/homedir"
"github.com/sirupsen/logrus"
@@ -32,7 +30,8 @@ import (
var (
// vmtype refers to qemu (vs libvirt, krun, etc)
- vmtype = define.WSLVirt
+ vmtype = define.WSLVirt
+ ErrWslNotSupported = errors.New("wsl features not supported or configured correctly")
)
type ExitCodeError struct {
@@ -95,7 +94,26 @@ func provisionWSLDist(name string, imagePath string, prompt string) (string, err
dist := env.WithPodmanPrefix(name)
fmt.Println(prompt)
- if err = runCmdPassThrough("wsl", "--import", dist, distTarget, imagePath, "--version", "2"); err != nil {
+
+ // Run WSL import and analyze output for specific errors.
+ // If the 'Virtual Machine Platform' feature is disabled, we expect a failure
+ // with HCS service-related errors such as:
+ // 1. Wsl/Service/RegisterDistro/CreateVm/HCS/ERROR_NOT_SUPPORTED
+ // 2. Wsl/Service/RegisterDistro/CreateVm/HCS/HCS_E_SERVICE_NOT_AVAILABLE
+ cmdOutput := &bytes.Buffer{}
+ err = runCmdPassThroughTee(cmdOutput, "wsl", "--import", dist, distTarget, imagePath, "--version", "2")
+ decoder := unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewDecoder()
+ decoded, _, decodeErr := transform.Bytes(decoder, cmdOutput.Bytes())
+ if decodeErr != nil {
+ return "", fmt.Errorf("failed to decode WSL output: %w", decodeErr)
+ }
+ decodedStr := strings.ToLower(string(decoded))
+ for _, substr := range []string{"hcs/error_not_supported", "hcs/hcs_e_service_not_available"} {
+ if strings.Contains(decodedStr, substr) {
+ return "", ErrWslNotSupported
+ }
+ }
+ if err != nil {
return "", fmt.Errorf("the WSL import of guest OS failed: %w", err)
}
@@ -301,41 +319,6 @@ func writeWslConf(dist string, user string) error {
return nil
}
-func checkAndInstallWSL(reExec bool) (bool, error) {
- if wutil.IsWSLInstalled() {
- return true, nil
- }
-
- admin := HasAdminRights()
-
- if !wutil.IsWSLFeatureEnabled() {
- return false, attemptFeatureInstall(reExec, admin)
- }
-
- skip := false
- if !reExec && !admin {
- fmt.Println("Launching WSL Kernel Install...")
- if err := launchElevate(wslInstallKernel); err != nil {
- return false, err
- }
-
- skip = true
- }
-
- if !skip {
- if err := installWslKernel(); err != nil {
- fmt.Fprintf(os.Stderr, wslKernelError, wslInstallKernel)
- return false, err
- }
-
- if reExec {
- return false, nil
- }
- }
-
- return true, nil
-}
-
func attemptFeatureInstall(reExec, admin bool) error {
if !winVersionAtLeast(10, 0, 18362) {
return errors.New("your version of Windows does not support WSL. Update to Windows 10 Build 19041 or later")
@@ -355,18 +338,17 @@ func attemptFeatureInstall(reExec, admin bool) error {
"If you prefer, you may abort now, and perform a manual installation using the \"wsl --install\" command."
if !reExec && MessageBox(message, "Podman Machine", false) != 1 {
- return errors.New("the WSL installation aborted")
+ return fmt.Errorf("the WSL installation aborted: %w", define.ErrInitRelaunchAttempt)
}
if !reExec && !admin {
return launchElevate("install the Windows WSL Features")
}
-
return installWsl()
}
func launchElevate(operation string) error {
- if err := truncateElevatedOutputFile(); err != nil {
+ if err := createOrTruncateElevatedOutputFile(); err != nil {
return err
}
err := relaunchElevatedWait()
@@ -374,15 +356,16 @@ func launchElevate(operation string) error {
if eerr, ok := err.(*ExitCodeError); ok {
if eerr.code == ErrorSuccessRebootRequired {
fmt.Println("Reboot is required to continue installation, please reboot at your convenience")
- return nil
+ return define.ErrInitRelaunchAttempt
}
}
fmt.Fprintf(os.Stderr, "Elevated process failed with error: %v\n\n", err)
dumpOutputFile()
fmt.Fprintf(os.Stderr, wslInstallError, operation)
+ return fmt.Errorf("%w: %w", err, define.ErrInitRelaunchAttempt)
}
- return err
+ return define.ErrInitRelaunchAttempt
}
func installWsl() error {
@@ -400,44 +383,10 @@ func installWsl() error {
"/featurename:VirtualMachinePlatform", "/all", "/norestart"); isMsiError(err) {
return fmt.Errorf("could not enable Virtual Machine Feature: %w", err)
}
- log.Close()
return reboot()
}
-func installWslKernel() error {
- log, err := getElevatedOutputFileWrite()
- if err != nil {
- return err
- }
- defer log.Close()
-
- message := "Installing WSL Kernel Update"
- fmt.Println(message)
- fmt.Fprintln(log, message)
-
- backoff := 500 * time.Millisecond
- for i := 0; i < 5; i++ {
- err = runCmdPassThroughTee(log, "wsl", "--update")
- if err == nil {
- break
- }
- // In case of unusual circumstances (e.g. race with installer actions)
- // retry a few times
- message = "An error occurred attempting the WSL Kernel update, retrying..."
- fmt.Println(message)
- fmt.Fprintln(log, message)
- time.Sleep(backoff)
- backoff *= 2
- }
-
- if err != nil {
- return fmt.Errorf("could not install WSL Kernel: %w", err)
- }
-
- return nil
-}
-
func getElevatedOutputFileName() (string, error) {
dir, err := homedir.GetDataHome()
if err != nil {
@@ -464,24 +413,14 @@ func getElevatedOutputFileWrite() (*os.File, error) {
return getElevatedOutputFile(os.O_WRONLY | os.O_CREATE | os.O_APPEND)
}
-func appendOutputIfError(write bool, err error) {
- if write && err == nil {
- return
- }
-
- if file, check := getElevatedOutputFileWrite(); check == nil {
- defer file.Close()
- fmt.Fprintf(file, "Error: %v\n", err)
- }
-}
-
-func truncateElevatedOutputFile() error {
+func createOrTruncateElevatedOutputFile() error {
name, err := getElevatedOutputFileName()
if err != nil {
return err
}
- return os.Truncate(name, 0)
+ _, err = os.Create(name)
+ return err
}
func getElevatedOutputFile(mode int) (*os.File, error) {
@@ -563,7 +502,7 @@ func runCmdPassThroughTee(out io.Writer, name string, arg ...string) error {
cmd.Stdin = os.Stdin
cmd.Stdout = io.MultiWriter(os.Stdout, out)
cmd.Stderr = io.MultiWriter(os.Stderr, out)
- if err := cmd.Run(); err != nil {
+ if err := cmd.Run(); isMsiError(err) {
return fmt.Errorf("command %s %v failed: %w", name, arg, err)
}
return nil
diff --git a/pkg/machine/wsl/stubber.go b/pkg/machine/wsl/stubber.go
index 9759512cbe8..6f6720c5dca 100644
--- a/pkg/machine/wsl/stubber.go
+++ b/pkg/machine/wsl/stubber.go
@@ -34,11 +34,6 @@ func (w WSLStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConf
go callbackFuncs.CleanOnSignal()
mc.WSLHypervisor = new(vmconfigs.WSLConfig)
- if cont, err := checkAndInstallWSL(opts.ReExec); !cont {
- appendOutputIfError(opts.ReExec, err)
- return err
- }
-
_ = setupWslProxyEnv()
if opts.UserModeNetworking {
@@ -51,6 +46,15 @@ func (w WSLStubber) CreateVM(opts define.CreateVMOpts, mc *vmconfigs.MachineConf
const prompt = "Importing operating system into WSL (this may take a few minutes on a new WSL install)..."
dist, err := provisionWSLDist(mc.Name, mc.ImagePath.GetPath(), prompt)
if err != nil {
+ if errors.Is(err, ErrWslNotSupported) {
+ // If error is Wsl/Service/RegisterDistro/CreateVm/HCS/ERROR_NOT_SUPPORTED
+ // or Wsl/Service/RegisterDistro/CreateVm/HCS/HCS_E_SERVICE_NOT_AVAILABLE
+ // it means WSL's VM creation failed, likely due to virtualization features not being enabled.
+ // Relaunching 'podman machine init' in elevated mode will attempt to reconfigure the WSL machine.
+ admin := HasAdminRights()
+
+ return attemptFeatureInstall(opts.ReExec, admin)
+ }
return err
}
diff --git a/pkg/machine/wsl/util_windows.go b/pkg/machine/wsl/util_windows.go
index a8efb3c7ab0..08d602d3cbc 100644
--- a/pkg/machine/wsl/util_windows.go
+++ b/pkg/machine/wsl/util_windows.go
@@ -16,6 +16,7 @@ import (
"unsafe"
"github.com/Microsoft/go-winio"
+ "github.com/containers/podman/v5/pkg/machine/define"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
"github.com/sirupsen/logrus"
@@ -216,6 +217,10 @@ func reboot() error {
}
}
+ if err := addRunOnceRegistryEntry(command); err != nil {
+ return err
+ }
+
message := "To continue the process of enabling WSL, the system needs to reboot. " +
"Alternatively, you can cancel and reboot manually\n\n" +
"After rebooting, please wait a minute or two for podman machine to relaunch and continue installing."
@@ -226,10 +231,6 @@ func reboot() error {
return nil
}
- if err := addRunOnceRegistryEntry(command); err != nil {
- return err
- }
-
if err := winio.RunWithPrivilege(rebootPrivilege, func() error {
if err := windows.ExitWindowsEx(rebootFlags, rebootReason); err != nil {
return fmt.Errorf("execute ExitWindowsEx to reboot system failed: %w", err)
@@ -239,7 +240,7 @@ func reboot() error {
return fmt.Errorf("cannot reboot system: %w", err)
}
- return nil
+ return define.ErrRebootInitiated
}
func addRunOnceRegistryEntry(command string) error {
diff --git a/pkg/machine/wsl/wutil/wutil.go b/pkg/machine/wsl/wutil/wutil.go
index b452d691d13..b65ab7bcb2b 100644
--- a/pkg/machine/wsl/wutil/wutil.go
+++ b/pkg/machine/wsl/wutil/wutil.go
@@ -78,14 +78,6 @@ func IsWSLInstalled() bool {
return status.installed && status.vmpFeatureEnabled
}
-func IsWSLFeatureEnabled() bool {
- if SilentExec("wsl", "--set-default-version", "2") != nil {
- return false
- }
- status := parseWSLStatus()
- return status.vmpFeatureEnabled
-}
-
func IsWSLStoreVersionInstalled() bool {
cmd := SilentExecCmd("wsl", "--version")
cmd.Stdout = nil
diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go
index 06e3ce5074e..119533d1b0f 100644
--- a/pkg/specgen/generate/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -515,6 +515,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
Source: v.Source,
Digest: v.Digest,
Title: v.Title,
+ Name: v.Name,
})
}
options = append(options, libpod.WithArtifactVolumes(vols))
diff --git a/pkg/specgen/utils_linux.go b/pkg/specgen/utils_linux.go
index 075f81c4f9b..0a5f794953c 100644
--- a/pkg/specgen/utils_linux.go
+++ b/pkg/specgen/utils_linux.go
@@ -9,94 +9,105 @@ import (
"golang.org/x/sys/unix"
)
+// statBlkDev returns path's major and minor, or an error, if path is not a block device.
+func statBlkDev(path string) (int64, int64, error) {
+ var stat unix.Stat_t
+
+ if err := unix.Stat(path, &stat); err != nil {
+ return 0, 0, fmt.Errorf("could not parse device %s: %w", path, err)
+ }
+ if stat.Mode&unix.S_IFMT != unix.S_IFBLK {
+ return 0, 0, fmt.Errorf("%s: not a block device", path)
+ }
+ rdev := uint64(stat.Rdev) //nolint:unconvert // Some architectures have different type.
+ return int64(unix.Major(rdev)), int64(unix.Minor(rdev)), nil
+}
+
+// fillThrottleDev fills in dev.Major and dev.Minor fields based on path to a block device.
+func fillThrottleDev(path string, dev *spec.LinuxThrottleDevice) error {
+ major, minor, err := statBlkDev(path)
+ if err != nil {
+ return err
+ }
+
+ dev.Major, dev.Minor = major, minor
+
+ return nil
+}
+
// FinishThrottleDevices takes the temporary representation of the throttle
-// devices in the specgen and looks up the major and major minors. it then
-// sets the throttle devices proper in the specgen
+// devices in the specgen, fills in major and minor numbers, and amends the
+// specgen with the specified devices. It returns an error if any device
+// specified doesn't exist or is not a block device.
func FinishThrottleDevices(s *SpecGenerator) error {
+ if len(s.ThrottleReadBpsDevice)+len(s.ThrottleWriteBpsDevice)+len(s.ThrottleReadIOPSDevice)+len(s.ThrottleWriteIOPSDevice) == 0 {
+ return nil
+ }
+
if s.ResourceLimits == nil {
s.ResourceLimits = &spec.LinuxResources{}
}
- if bps := s.ThrottleReadBpsDevice; len(bps) > 0 {
- if s.ResourceLimits.BlockIO == nil {
- s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{}
- }
- for k, v := range bps {
- statT := unix.Stat_t{}
- if err := unix.Stat(k, &statT); err != nil {
- return fmt.Errorf("could not parse throttle device at %s: %w", k, err)
- }
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
- if s.ResourceLimits.BlockIO == nil {
- s.ResourceLimits.BlockIO = new(spec.LinuxBlockIO)
- }
- s.ResourceLimits.BlockIO.ThrottleReadBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleReadBpsDevice, v)
- }
+ if s.ResourceLimits.BlockIO == nil {
+ s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{}
}
- if bps := s.ThrottleWriteBpsDevice; len(bps) > 0 {
- if s.ResourceLimits.BlockIO == nil {
- s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{}
- }
- for k, v := range bps {
- statT := unix.Stat_t{}
- if err := unix.Stat(k, &statT); err != nil {
- return fmt.Errorf("could not parse throttle device at %s: %w", k, err)
- }
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
- s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice, v)
+
+ for k, v := range s.ThrottleReadBpsDevice {
+ if err := fillThrottleDev(k, &v); err != nil {
+ return err
}
+ s.ResourceLimits.BlockIO.ThrottleReadBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleReadBpsDevice, v)
}
- if iops := s.ThrottleReadIOPSDevice; len(iops) > 0 {
- if s.ResourceLimits.BlockIO == nil {
- s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{}
- }
- for k, v := range iops {
- statT := unix.Stat_t{}
- if err := unix.Stat(k, &statT); err != nil {
- return fmt.Errorf("could not parse throttle device at %s: %w", k, err)
- }
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
- s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice, v)
+
+ for k, v := range s.ThrottleWriteBpsDevice {
+ if err := fillThrottleDev(k, &v); err != nil {
+ return err
}
+ s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice, v)
}
- if iops := s.ThrottleWriteIOPSDevice; len(iops) > 0 {
- if s.ResourceLimits.BlockIO == nil {
- s.ResourceLimits.BlockIO = &spec.LinuxBlockIO{}
+
+ for k, v := range s.ThrottleReadIOPSDevice {
+ if err := fillThrottleDev(k, &v); err != nil {
+ return err
}
- for k, v := range iops {
- statT := unix.Stat_t{}
- if err := unix.Stat(k, &statT); err != nil {
- return fmt.Errorf("could not parse throttle device at %s: %w", k, err)
- }
- v.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
- v.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
- s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice, v)
+ s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice, v)
+ }
+
+ for k, v := range s.ThrottleWriteIOPSDevice {
+ if err := fillThrottleDev(k, &v); err != nil {
+ return err
}
+ s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice, v)
}
+
return nil
}
func WeightDevices(specgen *SpecGenerator) error {
- devs := []spec.LinuxWeightDevice{}
+ if len(specgen.WeightDevice) == 0 {
+ return nil
+ }
+
if specgen.ResourceLimits == nil {
specgen.ResourceLimits = &spec.LinuxResources{}
}
+ if specgen.ResourceLimits.BlockIO == nil {
+ specgen.ResourceLimits.BlockIO = &spec.LinuxBlockIO{}
+ }
+
for k, v := range specgen.WeightDevice {
- statT := unix.Stat_t{}
- if err := unix.Stat(k, &statT); err != nil {
- return fmt.Errorf("failed to inspect '%s' in --blkio-weight-device: %w", k, err)
- }
- dev := new(spec.LinuxWeightDevice)
- dev.Major = (int64(unix.Major(uint64(statT.Rdev)))) //nolint: unconvert
- dev.Minor = (int64(unix.Minor(uint64(statT.Rdev)))) //nolint: unconvert
- dev.Weight = v.Weight
- devs = append(devs, *dev)
- if specgen.ResourceLimits.BlockIO == nil {
- specgen.ResourceLimits.BlockIO = &spec.LinuxBlockIO{}
+ major, minor, err := statBlkDev(k)
+ if err != nil {
+ return fmt.Errorf("bad --blkio-weight-device: %w", err)
}
- specgen.ResourceLimits.BlockIO.WeightDevice = devs
+ specgen.ResourceLimits.BlockIO.WeightDevice = append(specgen.ResourceLimits.BlockIO.WeightDevice,
+ spec.LinuxWeightDevice{
+ LinuxBlockIODevice: spec.LinuxBlockIODevice{
+ Major: major,
+ Minor: minor,
+ },
+ Weight: v.Weight,
+ })
}
+
return nil
}
diff --git a/pkg/specgen/volumes.go b/pkg/specgen/volumes.go
index 786f271c833..3243333da51 100644
--- a/pkg/specgen/volumes.go
+++ b/pkg/specgen/volumes.go
@@ -78,6 +78,11 @@ type ArtifactVolume struct {
// the title annotation exist.
// Optional. Conflicts with Title.
Digest string `json:"digest,omitempty"`
+ // Name is the name that should be used for the path inside the container. When a single blob
+ // is mounted the name is used as is. If multiple blobs are mounted then mount them as
+ // "-x" where x is a 0 indexed integer based on the layer order.
+ // Optional.
+ Name string `json:"name,omitempty"`
}
// GenVolumeMounts parses user input into mounts, volumes and overlay volumes
diff --git a/pkg/specgenutil/volumes.go b/pkg/specgenutil/volumes.go
index 02a7627a361..69366a1e902 100644
--- a/pkg/specgenutil/volumes.go
+++ b/pkg/specgenutil/volumes.go
@@ -747,6 +747,11 @@ func getArtifactVolume(args []string) (*specgen.ArtifactVolume, error) {
return nil, fmt.Errorf("%v: %w", name, errOptionArg)
}
newVolume.Digest = value
+ case "name":
+ if !hasValue {
+ return nil, fmt.Errorf("%v: %w", name, errOptionArg)
+ }
+ newVolume.Name = value
default:
return nil, fmt.Errorf("%s: %w", name, util.ErrBadMntOption)
}
diff --git a/pkg/systemd/quadlet/quadlet.go b/pkg/systemd/quadlet/quadlet.go
index 4fd27deab9a..e11a7fde44e 100644
--- a/pkg/systemd/quadlet/quadlet.go
+++ b/pkg/systemd/quadlet/quadlet.go
@@ -1115,6 +1115,9 @@ func ConvertVolume(volume *parser.UnitFile, name string, unitsInfoMap map[string
if ok && len(devType) != 0 {
if devValid {
podman.add("--opt", fmt.Sprintf("type=%s", devType))
+ if devType == "bind" {
+ service.Add(UnitGroup, "RequiresMountsFor", dev)
+ }
} else {
return nil, warnings, fmt.Errorf("key Type can't be used without Device")
}
diff --git a/rpm/podman.spec b/rpm/podman.spec
index a717907c212..fd89e342f44 100644
--- a/rpm/podman.spec
+++ b/rpm/podman.spec
@@ -96,6 +96,7 @@ BuildRequires: shadow-utils-subid-devel
BuildRequires: pkgconfig
BuildRequires: make
BuildRequires: man-db
+BuildRequires: sqlite-devel
BuildRequires: systemd
BuildRequires: systemd-devel
Requires: catatonit
@@ -252,7 +253,7 @@ LDFLAGS="$LDFLAGS -X %{ld_libpod}/define.gitCommit=$GIT_COMMIT"
# build rootlessport first
%gobuild -o bin/rootlessport ./cmd/rootlessport
-export BASEBUILDTAGS="seccomp $(hack/systemd_tag.sh) $(hack/libsubid_tag.sh)"
+export BASEBUILDTAGS="seccomp $(hack/systemd_tag.sh) $(hack/libsubid_tag.sh) libsqlite3"
# libtrust_openssl buildtag switches to using the FIPS-compatible func
# `ecdsa.HashSign`.
@@ -263,7 +264,7 @@ export BASEBUILDTAGS="$BASEBUILDTAGS libtrust_openssl"
%endif
# build %%{name}
-export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh) $(hack/libdm_tag.sh)"
+export BUILDTAGS="$BASEBUILDTAGS $(hack/btrfs_installed_tag.sh)"
%gobuild -o bin/%{name} ./cmd/%{name}
# build %%{name}-remote
diff --git a/test/apiv2/python/requirements.txt b/test/apiv2/python/requirements.txt
index 23a1e2e4c9a..56bb9e450be 100644
--- a/test/apiv2/python/requirements.txt
+++ b/test/apiv2/python/requirements.txt
@@ -4,5 +4,5 @@ setuptools~=80.9.0
python-dateutil~=2.9.0
PyYAML~=6.0.0
openapi-schema-validator~=0.6.2
-pytest==8.4.0
+pytest==8.4.1
docker~=7.1.0
diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas
index da4517e2d69..aa3a1086484 100755
--- a/test/buildah-bud/apply-podman-deltas
+++ b/test/buildah-bud/apply-podman-deltas
@@ -240,7 +240,8 @@ skip_if_remote "--output option not implemented in podman-remote" \
"build with custom build output and output rootfs to tar with no additional step" \
"build with custom build output for single-stage-cached and output rootfs to directory" \
"build with custom build output for multi-stage-cached and output rootfs to directory" \
- "build with custom build output for multi-stage and output rootfs to directory"
+ "build with custom build output for multi-stage and output rootfs to directory" \
+ "build-with-two-outputs"
# https://github.com/containers/podman/issues/14544
skip_if_remote "logfile not implemented on remote" "bud-logfile-with-split-logfile-by-platform"
diff --git a/test/buildah-bud/buildah-tests.diff b/test/buildah-bud/buildah-tests.diff
index 5758aa58c19..a73790dbe09 100644
--- a/test/buildah-bud/buildah-tests.diff
+++ b/test/buildah-bud/buildah-tests.diff
@@ -1,4 +1,4 @@
-From 09a0ad17630b688e0f06e3de83b830b70f6c50fc Mon Sep 17 00:00:00 2001
+From 814de89af4706b9767b457fd74b9f66f592e7afa Mon Sep 17 00:00:00 2001
From: Ed Santiago
Date: Thu, 6 Oct 2022 17:32:59 -0600
Subject: [PATCH] tweaks for running buildah tests under podman
@@ -10,10 +10,10 @@ Signed-off-by: Paul Holzinger
1 file changed, 109 insertions(+), 4 deletions(-)
diff --git a/tests/helpers.bash b/tests/helpers.bash
-index ed5de994e..3fc39e17b 100644
+index 3d1211492..dbf175de3 100644
--- a/tests/helpers.bash
+++ b/tests/helpers.bash
-@@ -80,6 +80,42 @@ EOF
+@@ -81,6 +81,42 @@ EOF
BUILDAH_REGISTRY_OPTS="${regconfopt} ${regconfdir} --short-name-alias-conf ${TEST_SCRATCH_DIR}/cache/shortnames.conf"
COPY_REGISTRY_OPTS="${BUILDAH_REGISTRY_OPTS}"
PODMAN_REGISTRY_OPTS="${regconfopt}"
@@ -56,7 +56,7 @@ index ed5de994e..3fc39e17b 100644
}
function starthttpd() { # directory [working-directory-or-"" [certfile, keyfile]]
-@@ -144,6 +180,22 @@ function teardown_tests() {
+@@ -145,6 +181,22 @@ function teardown_tests() {
stop_git_daemon
stop_registry
@@ -79,7 +79,7 @@ index ed5de994e..3fc39e17b 100644
# Workaround for #1991 - buildah + overlayfs leaks mount points.
# Many tests leave behind /var/tmp/.../root/overlay and sub-mounts;
# let's find those and clean them up, otherwise 'rm -rf' fails.
-@@ -265,7 +317,12 @@ function copy() {
+@@ -266,7 +318,12 @@ function copy() {
}
function podman() {
@@ -93,7 +93,7 @@ index ed5de994e..3fc39e17b 100644
}
# There are various scenarios where we would like to execute `tests` as rootless user, however certain commands like `buildah mount`
-@@ -329,8 +386,36 @@ function run_buildah() {
+@@ -373,8 +430,36 @@ function run_buildah() {
--retry) retry=3; shift;; # retry network flakes
esac
@@ -131,7 +131,7 @@ index ed5de994e..3fc39e17b 100644
# If session is rootless and `buildah mount` is invoked, perform unshare,
# since normal user cannot mount a filesystem unless they're in a user namespace along with its own mount namespace.
-@@ -344,8 +429,8 @@ function run_buildah() {
+@@ -388,8 +473,8 @@ function run_buildah() {
retry=$(( retry - 1 ))
# stdout is only emitted upon error; this echo is to help a debugger
@@ -142,7 +142,7 @@ index ed5de994e..3fc39e17b 100644
# without "quotes", multiple lines are glommed together into one
if [ -n "$output" ]; then
echo "$output"
-@@ -706,6 +791,26 @@ function skip_if_no_unshare() {
+@@ -753,6 +838,26 @@ function skip_if_no_unshare() {
fi
}
@@ -166,9 +166,9 @@ index ed5de994e..3fc39e17b 100644
+ fi
+}
+
- function start_git_daemon() {
- daemondir=${TEST_SCRATCH_DIR}/git-daemon
- mkdir -p ${daemondir}/repo
+ ######################
+ # start_git_daemon #
+ ######################
--
-2.48.1
+2.49.0
diff --git a/test/buildah-bud/run-buildah-bud-tests b/test/buildah-bud/run-buildah-bud-tests
index 0ed3a2e3be5..74acbb169df 100755
--- a/test/buildah-bud/run-buildah-bud-tests
+++ b/test/buildah-bud/run-buildah-bud-tests
@@ -177,13 +177,15 @@ if [[ -n $do_checkout ]]; then
# the set of patches
(set -x;git tag $BASE_TAG)
- # Build buildah and the copy and inet helpers
+ # Build buildah and the copy, inet, and dumpspec test helpers
failhint="error building buildah. This should never happen."
(set -x;make bin/buildah)
failhint="error building buildah's copy helper. This should never happen."
(set -x;make bin/copy)
failhint="error building buildah's inet helper. This should never happen."
(set -x;make bin/inet)
+ failhint="error building buildah's dumpspec helper. This should never happen."
+ (set -x;make bin/dumpspec)
# The upcoming patch may fail. Before we try it, create a helper script
# for a developer to push a new set of diffs to podman-land.
@@ -262,5 +264,6 @@ review the test failure and double-check your changes.
BUILDAH_BINARY=$(pwd)/bin/buildah \
COPY_BINARY=$(pwd)/bin/copy \
INET_BINARY=$(pwd)/bin/inet \
+ DUMPSPEC_BINARY=$(pwd)/bin/dumpspec \
bats -j $(nproc) "${bats_filter[@]}" tests/bud.bats)
fi
diff --git a/test/e2e/artifact_mount_test.go b/test/e2e/artifact_mount_test.go
index d158dc13312..4b71df43d63 100644
--- a/test/e2e/artifact_mount_test.go
+++ b/test/e2e/artifact_mount_test.go
@@ -29,7 +29,7 @@ var _ = Describe("Podman artifact mount", func() {
{
name: "single artifact mount",
mountOpts: "dst=/test",
- containerFile: "/test/testfile",
+ containerFile: "/test",
},
{
name: "single artifact mount on existing file",
@@ -44,7 +44,12 @@ var _ = Describe("Podman artifact mount", func() {
{
name: "single artifact mount with digest",
mountOpts: "dst=/data,digest=sha256:e9510923578af3632946ecf5ae479c1b5f08b47464e707b5cbab9819272a9752",
- containerFile: "/data/sha256-e9510923578af3632946ecf5ae479c1b5f08b47464e707b5cbab9819272a9752",
+ containerFile: "/data",
+ },
+ {
+ name: "single artifact mount with name",
+ mountOpts: "dst=/tmp,name=abcd",
+ containerFile: "/tmp/abcd",
},
}
@@ -104,25 +109,49 @@ var _ = Describe("Podman artifact mount", func() {
},
},
{
- name: "multi blob filter by title",
+ name: "multi blob filter by title on non existing file",
mountOpts: "src=" + ARTIFACT_MULTI + ",dst=/test,title=test2",
containerFiles: []expectedFiles{
{
- file: "/test/test2",
+ file: "/test",
+ content: artifactContent2,
+ },
+ },
+ },
+ {
+ name: "multi blob filter by title on existing file",
+ mountOpts: "src=" + ARTIFACT_MULTI + ",dst=/tmp,title=test2",
+ containerFiles: []expectedFiles{
+ {
+ file: "/tmp/test2",
content: artifactContent2,
},
},
},
{
name: "multi blob filter by digest",
- mountOpts: "src=" + ARTIFACT_MULTI + ",dst=/test,digest=sha256:8257bba28b9d19ac353c4b713b470860278857767935ef7e139afd596cb1bb2d",
+ mountOpts: "src=" + ARTIFACT_MULTI + ",dst=/tmp,digest=sha256:8257bba28b9d19ac353c4b713b470860278857767935ef7e139afd596cb1bb2d",
containerFiles: []expectedFiles{
{
- file: "/test/sha256-8257bba28b9d19ac353c4b713b470860278857767935ef7e139afd596cb1bb2d",
+ file: "/tmp/sha256-8257bba28b9d19ac353c4b713b470860278857767935ef7e139afd596cb1bb2d",
content: artifactContent1,
},
},
},
+ {
+ name: "multi blob with name",
+ mountOpts: "src=" + ARTIFACT_MULTI + ",dst=/test,name=myname",
+ containerFiles: []expectedFiles{
+ {
+ file: "/test/myname-0",
+ content: artifactContent1,
+ },
+ {
+ file: "/test/myname-1",
+ content: artifactContent2,
+ },
+ },
+ },
}
for _, tt := range tests {
By(tt.name)
@@ -152,12 +181,12 @@ var _ = Describe("Podman artifact mount", func() {
podmanTest.PodmanExitCleanly("artifact", "add", artifactName, artifactFile)
// FIXME: we need https://github.com/containers/container-selinux/pull/360 to fix the selinux access problem, until then disable it.
- podmanTest.PodmanExitCleanly("run", "--security-opt=label=disable", "--name", ctrName, "-d", "--mount", "type=artifact,src="+artifactName+",dst=/test", ALPINE, "sleep", "100")
+ podmanTest.PodmanExitCleanly("run", "--security-opt=label=disable", "--name", ctrName, "-d", "--mount", "type=artifact,src="+artifactName+",dst=/tmp", ALPINE, "sleep", "100")
podmanTest.PodmanExitCleanly("artifact", "rm", artifactName)
// file must sill be readable after artifact removal
- session := podmanTest.PodmanExitCleanly("exec", ctrName, "cat", "/test/"+artifactFileName)
+ session := podmanTest.PodmanExitCleanly("exec", ctrName, "cat", "/tmp/"+artifactFileName)
Expect(session.OutputToString()).To(Equal("hello world"))
// restart will fail if artifact does not exist
@@ -174,7 +203,7 @@ var _ = Describe("Podman artifact mount", func() {
podmanTest.PodmanExitCleanly("artifact", "add", artifactName, artifactFile, artifactFile2)
podmanTest.PodmanExitCleanly("start", ctrName)
- session = podmanTest.PodmanExitCleanly("exec", ctrName, "cat", "/test/"+artifactFileName, "/test/"+artifactFile2Name)
+ session = podmanTest.PodmanExitCleanly("exec", ctrName, "cat", "/tmp/"+artifactFileName, "/tmp/"+artifactFile2Name)
Expect(session.OutputToString()).To(Equal("hello world second file"))
})
diff --git a/test/e2e/buildx_inspect_test.go b/test/e2e/buildx_inspect_test.go
new file mode 100644
index 00000000000..2ce79861b48
--- /dev/null
+++ b/test/e2e/buildx_inspect_test.go
@@ -0,0 +1,38 @@
+//go:build linux || freebsd
+
+package integration
+
+import (
+ "encoding/json"
+ "regexp"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman buildx inspect", func() {
+ It("podman buildx inspect", func() {
+ session := podmanTest.PodmanExitCleanly("buildx", "inspect")
+ out := session.OutputToString()
+
+ session_bootstrap := podmanTest.PodmanExitCleanly("buildx", "inspect", "--bootstrap")
+ out_bootstrap := session_bootstrap.OutputToString()
+ Expect(out_bootstrap).To(Equal(out), "Output of 'podman buildx inspect' and 'podman buildx inspect --bootstrap' should be the same")
+
+ emuInfo := podmanTest.PodmanExitCleanly("info", "--format", "{{json .Host.EmulatedArchitectures}}")
+ var emuArchs []string
+ Expect(json.Unmarshal([]byte(emuInfo.OutputToString()), &emuArchs)).To(Succeed())
+
+ nativeInfo := podmanTest.PodmanExitCleanly("info", "--format", "{{.Host.OS}}/{{.Host.Arch}}")
+ nativePlat := strings.TrimSpace(nativeInfo.OutputToString())
+ Expect(nativePlat).ToNot(BeEmpty())
+
+ expected := append(emuArchs, nativePlat)
+
+ for _, p := range expected {
+ re := regexp.MustCompile(`(?s)Platforms:.*\b` + regexp.QuoteMeta(p) + `\b`)
+ Expect(out).To(MatchRegexp(re.String()), "missing %q in:\n%s", p, out)
+ }
+ })
+})
diff --git a/test/e2e/quadlet/device-bind.volume b/test/e2e/quadlet/device-bind.volume
new file mode 100644
index 00000000000..9b68ee9cd6a
--- /dev/null
+++ b/test/e2e/quadlet/device-bind.volume
@@ -0,0 +1,9 @@
+## assert-key-contains Service ExecStart " --opt type=bind "
+## assert-key-contains Service ExecStart " --opt device=/var/lib/data "
+## assert-key-contains Service ExecStart " --opt nocopy "
+## assert-key-contains Unit RequiresMountsFor "/var/lib/data"
+
+[Volume]
+Device=/var/lib/data
+Type=bind
+Copy=no
diff --git a/test/e2e/quadlet_test.go b/test/e2e/quadlet_test.go
index daf63e9d041..982c9cd3ae1 100644
--- a/test/e2e/quadlet_test.go
+++ b/test/e2e/quadlet_test.go
@@ -803,7 +803,6 @@ BOGUS=foo
dirName := "test_subdir"
err = CopyDirectory(filepath.Join("quadlet", dirName), quadletDir)
-
if err != nil {
GinkgoWriter.Println("error:", err)
}
@@ -975,6 +974,7 @@ BOGUS=foo
Entry("image.volume", "image.volume"),
Entry("Volume - global args", "globalargs.volume"),
Entry("Volume - Containers Conf Modules", "containersconfmodule.volume"),
+ Entry("Volume - Type=bind", "device-bind.volume"),
Entry("Absolute Path", "absolute.path.kube"),
Entry("Basic kube", "basic.kube"),
@@ -1276,5 +1276,4 @@ BOGUS=foo
},
),
)
-
})
diff --git a/test/system/280-update.bats b/test/system/280-update.bats
index 19c0e3aa478..0c87332900c 100644
--- a/test/system/280-update.bats
+++ b/test/system/280-update.bats
@@ -326,4 +326,27 @@ function nrand() {
run_podman rm -t 0 -f $ctrname
}
+
+# bats test_tags=ci:parallel
+@test "podman update - non-block device rejected by --*device* options" {
+ local dev=/dev/zero # Not a block device.
+ local block_opts=(
+ "--blkio-weight-device=$dev:123"
+ "--device-read-bps=$dev:10mb"
+ "--device-write-bps=$dev:10mb"
+ "--device-read-iops=$dev:1000"
+ "--device-write-iops=$dev:1000"
+ )
+ run_podman run -d "$IMAGE" /home/podman/pause
+ cid="$output"
+
+ defer-assertion-failures
+ for opt in "${block_opts[@]}"; do
+ run_podman 125 update "$cid" "$opt"
+ assert "$output" =~ "$dev: not a block device"
+ done
+ immediate-assertion-failures
+
+ run_podman rm -t 0 -f "$cid"
+}
# vim: filetype=sh
diff --git a/test/tools/go.mod b/test/tools/go.mod
index 8b1ca43120a..fcf2e8c4a86 100644
--- a/test/tools/go.mod
+++ b/test/tools/go.mod
@@ -7,35 +7,36 @@ go 1.22.0
require (
github.com/cpuguy83/go-md2man/v2 v2.0.7
- github.com/go-swagger/go-swagger v0.30.5
+ github.com/go-swagger/go-swagger v0.32.3
github.com/vbatts/git-validation v1.2.2
)
require (
github.com/Masterminds/goutils v1.1.1 // indirect
- github.com/Masterminds/semver/v3 v3.2.0 // indirect
+ github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/fatih/color v1.15.0 // indirect
- github.com/felixge/httpsnoop v1.0.3 // indirect
- github.com/fsnotify/fsnotify v1.6.0 // indirect
- github.com/go-openapi/analysis v0.21.4 // indirect
- github.com/go-openapi/errors v0.20.4 // indirect
- github.com/go-openapi/inflect v0.19.0 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
- github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/loads v0.21.2 // indirect
- github.com/go-openapi/runtime v0.26.0 // indirect
- github.com/go-openapi/spec v0.20.9 // indirect
- github.com/go-openapi/strfmt v0.21.7 // indirect
- github.com/go-openapi/swag v0.22.4 // indirect
- github.com/go-openapi/validate v0.22.1 // indirect
- github.com/google/uuid v1.3.0 // indirect
- github.com/gorilla/handlers v1.5.1 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/go-openapi/analysis v0.23.0 // indirect
+ github.com/go-openapi/errors v0.22.0 // indirect
+ github.com/go-openapi/inflect v0.21.0 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/loads v0.22.0 // indirect
+ github.com/go-openapi/runtime v0.28.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
+ github.com/go-openapi/strfmt v0.23.0 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-openapi/validate v0.24.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/handlers v1.5.2 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/huandu/xstrings v1.3.3 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
+ github.com/huandu/xstrings v1.4.0 // indirect
+ github.com/imdario/mergo v0.3.16 // indirect
github.com/jessevdk/go-flags v1.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
@@ -49,21 +50,24 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
- github.com/pelletier/go-toml/v2 v2.0.8 // indirect
- github.com/pkg/errors v0.9.1 // indirect
- github.com/rogpeppe/go-internal v1.9.0 // indirect
+ github.com/pelletier/go-toml/v2 v2.1.1 // indirect
+ github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/shopspring/decimal v1.2.0 // indirect
+ github.com/sagikazarmark/locafero v0.4.0 // indirect
+ github.com/sagikazarmark/slog-shim v0.1.0 // indirect
+ github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- github.com/spf13/afero v1.9.5 // indirect
- github.com/spf13/cast v1.5.1 // indirect
- github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/sourcegraph/conc v0.3.0 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
+ github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
- github.com/spf13/viper v1.16.0 // indirect
- github.com/subosito/gotenv v1.4.2 // indirect
+ github.com/spf13/viper v1.18.2 // indirect
+ github.com/subosito/gotenv v1.6.0 // indirect
github.com/toqueteos/webbrowser v1.2.0 // indirect
- go.mongodb.org/mongo-driver v1.11.3 // indirect
+ go.mongodb.org/mongo-driver v1.14.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
+ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
diff --git a/test/tools/go.sum b/test/tools/go.sum
index 3e93454a83d..b8d68a3ab07 100644
--- a/test/tools/go.sum
+++ b/test/tools/go.sum
@@ -1,264 +1,84 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
+github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
-github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
-github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
-github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
-github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
-github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
-github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
-github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
-github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
-github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
-github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
-github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
-github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
-github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
-github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
-github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc=
-github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
-github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
-github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
-github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8=
-github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
-github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
-github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
-github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
-github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
-github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
-github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
-github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-swagger/go-swagger v0.30.5 h1:SQ2+xSonWjjoEMOV5tcOnZJVlfyUfCBhGQGArS1b9+U=
-github.com/go-swagger/go-swagger v0.30.5/go.mod h1:cWUhSyCNqV7J1wkkxfr5QmbcnCewetCdvEXqgPvbc/Q=
-github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0=
-github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013/go.mod h1:b65mBPzqzZWxOZGxSWrqs4GInLIn+u99Q9q7p+GKni0=
-github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
-github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
-github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
-github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
-github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
-github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
-github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
-github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
-github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
-github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
-github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
-github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
-github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
-github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
-github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
-github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
+github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
+github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
+github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
+github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk=
+github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
+github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
+github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
+github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
+github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
+github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
+github.com/go-swagger/go-swagger v0.32.3 h1:bhAfZ4WaFXyPuw2OrXg34rOcUBR++fpVdonRRYzBK1c=
+github.com/go-swagger/go-swagger v0.32.3/go.mod h1:lAwO1nKff3qNRJYVQeTCl1am5pcNiiA2VyDf8TqzS24=
+github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
+github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
-github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
+github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
+github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
+github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
-github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
-github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@@ -268,259 +88,92 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
-github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
-github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
-github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
+github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
+github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
+github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
+github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
+github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
+github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
-github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
-github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
-github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
+github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
+github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
-github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
-github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ=
github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM=
github.com/vbatts/git-validation v1.2.2 h1:AHTS8Jara7Pcu0ub7RusMBAvyLtjsJtF/xPU5Pm1BPE=
github.com/vbatts/git-validation v1.2.2/go.mod h1:Fj+04EdPcZ0rMOR+dqvppMVVgyNcOFRT5iFWuIc593A=
-github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
-github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
-github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
-github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
-github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
-go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
-go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
-go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
-go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
+go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
+golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -529,193 +182,27 @@ golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/test/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml
index c87d1c4b90e..fbc6332592f 100644
--- a/test/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml
+++ b/test/tools/vendor/github.com/Masterminds/semver/v3/.golangci.yml
@@ -5,12 +5,9 @@ linters:
disable-all: true
enable:
- misspell
- - structcheck
- govet
- staticcheck
- - deadcode
- errcheck
- - varcheck
- unparam
- ineffassign
- nakedret
diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/Makefile b/test/tools/vendor/github.com/Masterminds/semver/v3/Makefile
index eac19178fbd..0e7b5c7138e 100644
--- a/test/tools/vendor/github.com/Masterminds/semver/v3/Makefile
+++ b/test/tools/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -1,7 +1,5 @@
GOPATH=$(shell go env GOPATH)
GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
-GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build
-GOFUZZ = $(GOPATH)/bin/go-fuzz
.PHONY: lint
lint: $(GOLANGCI_LINT)
@@ -19,19 +17,14 @@ test-cover:
GO111MODULE=on go test -cover .
.PHONY: fuzz
-fuzz: $(GOFUZZBUILD) $(GOFUZZ)
- @echo "==> Fuzz testing"
- $(GOFUZZBUILD)
- $(GOFUZZ) -workdir=_fuzz
+fuzz:
+ @echo "==> Running Fuzz Tests"
+ go test -fuzz=FuzzNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzNewConstraint -fuzztime=15s .
$(GOLANGCI_LINT):
# Install golangci-lint. The configuration for it is in the .golangci.yml
# file in the root of the repository
echo ${GOPATH}
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
-
-$(GOFUZZBUILD):
- cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
-
-$(GOFUZZ):
- cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep
\ No newline at end of file
diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/README.md b/test/tools/vendor/github.com/Masterminds/semver/v3/README.md
index d8f54dcbd3c..eab8cac3b7f 100644
--- a/test/tools/vendor/github.com/Masterminds/semver/v3/README.md
+++ b/test/tools/vendor/github.com/Masterminds/semver/v3/README.md
@@ -18,18 +18,20 @@ If you are looking for a command line tool for version comparisons please see
## Package Versions
+Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
+
There are three major versions fo the `semver` package.
-* 3.x.x is the new stable and active version. This version is focused on constraint
+* 3.x.x is the stable and active version. This version is focused on constraint
compatibility for range handling in other tools from other languages. It has
a similar API to the v1 releases. The development of this version is on the master
branch. The documentation for this version is below.
* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
-* 1.x.x is the most widely used version with numerous tagged releases. This is the
- previous stable and is still maintained for bug fixes. The development, to fix
- bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
+* 1.x.x is the original release. It is no longer maintained. You should use the
+ v3 release instead. You can read the documentation for the 1.x.x release
+ [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
## Parsing Semantic Versions
@@ -242,3 +244,15 @@ for _, m := range msgs {
If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
or [create a pull request](https://github.com/Masterminds/semver/pulls).
+
+## Security
+
+Security is an important consideration for this project. The project currently
+uses the following tools to help discover security issues:
+
+* [CodeQL](https://github.com/Masterminds/semver)
+* [gosec](https://github.com/securego/gosec)
+* Daily Fuzz testing
+
+If you believe you have found a security vulnerability you can privately disclose
+it through the [GitHub security page](https://github.com/Masterminds/semver/security).
diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/test/tools/vendor/github.com/Masterminds/semver/v3/SECURITY.md
new file mode 100644
index 00000000000..a30a66b1f74
--- /dev/null
+++ b/test/tools/vendor/github.com/Masterminds/semver/v3/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+The following versions of semver are currently supported:
+
+| Version | Supported |
+| ------- | ------------------ |
+| 3.x | :white_check_mark: |
+| 2.x | :x: |
+| 1.x | :x: |
+
+Fixes are only released for the latest minor version in the form of a patch release.
+
+## Reporting a Vulnerability
+
+You can privately disclose a vulnerability through GitHubs
+[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories)
+mechanism.
diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/constraints.go b/test/tools/vendor/github.com/Masterminds/semver/v3/constraints.go
index 203072e4646..8461c7ed903 100644
--- a/test/tools/vendor/github.com/Masterminds/semver/v3/constraints.go
+++ b/test/tools/vendor/github.com/Masterminds/semver/v3/constraints.go
@@ -586,7 +586,7 @@ func rewriteRange(i string) string {
}
o := i
for _, v := range m {
- t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
+ t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11])
o = strings.Replace(o, v[0], t, 1)
}
diff --git a/test/tools/vendor/github.com/Masterminds/semver/v3/fuzz.go b/test/tools/vendor/github.com/Masterminds/semver/v3/fuzz.go
deleted file mode 100644
index a242ad70587..00000000000
--- a/test/tools/vendor/github.com/Masterminds/semver/v3/fuzz.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build gofuzz
-
-package semver
-
-func Fuzz(data []byte) int {
- d := string(data)
-
- // Test NewVersion
- _, _ = NewVersion(d)
-
- // Test StrictNewVersion
- _, _ = StrictNewVersion(d)
-
- // Test NewConstraint
- _, _ = NewConstraint(d)
-
- // The return value should be 0 normally, 1 if the priority in future tests
- // should be increased, and -1 if future tests should skip passing in that
- // data. We do not have a reason to change priority so 0 is always returned.
- // There are example tests that do this.
- return 0
-}
diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/.travis.yml b/test/tools/vendor/github.com/felixge/httpsnoop/.travis.yml
deleted file mode 100644
index bfc421200d0..00000000000
--- a/test/tools/vendor/github.com/felixge/httpsnoop/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - 1.8
diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/Makefile b/test/tools/vendor/github.com/felixge/httpsnoop/Makefile
index 2d84889aed7..4e12afdd90d 100644
--- a/test/tools/vendor/github.com/felixge/httpsnoop/Makefile
+++ b/test/tools/vendor/github.com/felixge/httpsnoop/Makefile
@@ -1,7 +1,7 @@
.PHONY: ci generate clean
ci: clean generate
- go test -v ./...
+ go test -race -v ./...
generate:
go generate .
diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/README.md b/test/tools/vendor/github.com/felixge/httpsnoop/README.md
index ddcecd13e73..cf6b42f3d77 100644
--- a/test/tools/vendor/github.com/felixge/httpsnoop/README.md
+++ b/test/tools/vendor/github.com/felixge/httpsnoop/README.md
@@ -7,8 +7,8 @@ http.Handlers.
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
-[](https://godoc.org/github.com/felixge/httpsnoop)
-[](https://travis-ci.org/felixge/httpsnoop)
+[](https://pkg.go.dev/github.com/felixge/httpsnoop)
+[](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
## Usage Example
diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/test/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go
index b77cc7c0095..bec7b71b39c 100644
--- a/test/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go
+++ b/test/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -52,7 +52,7 @@ func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWri
return func(code int) {
next(code)
- if !headerWritten {
+ if !(code >= 100 && code <= 199) && !headerWritten {
m.Code = code
headerWritten = true
}
diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
index 31cbdfb8ef0..101cedde674 100644
--- a/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
+++ b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -1,5 +1,5 @@
// +build go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
index ab99c07c7a1..e0951df1527 100644
--- a/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
+++ b/test/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -1,5 +1,5 @@
// +build !go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/test/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
new file mode 100644
index 00000000000..ffc7b992b3c
--- /dev/null
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml
@@ -0,0 +1,13 @@
+freebsd_task:
+ name: 'FreeBSD'
+ freebsd_instance:
+ image_family: freebsd-13-2
+ install_script:
+ - pkg update -f
+ - pkg install -y go
+ test_script:
+ # run tests as user "cirrus" instead of root
+ - pw useradd cirrus -m
+ - chown -R cirrus:cirrus .
+ - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
+ - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/.gitignore b/test/tools/vendor/github.com/fsnotify/fsnotify/.gitignore
index 1d89d85ce4f..391cc076b12 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/.gitignore
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -4,3 +4,4 @@
# Output of go build ./cmd/fsnotify
/fsnotify
+/fsnotify.exe
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/test/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index 77f9593bd58..e0e57575496 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,16 +1,87 @@
# Changelog
-All notable changes to this project will be documented in this file.
+Unreleased
+----------
+Nothing yet.
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+1.7.0 - 2023-10-22
+------------------
+This version of fsnotify needs Go 1.17.
-## [Unreleased]
+### Additions
-Nothing yet.
+- illumos: add FEN backend to support illumos and Solaris. ([#371])
+
+- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
+ in cases where you can't control the kernel buffer and receive a large number
+ of events in bursts. ([#550], [#572])
+
+- all: add `AddWith()`, which is identical to `Add()` but allows passing
+ options. ([#521])
+
+- windows: allow setting the ReadDirectoryChangesW() buffer size with
+ `fsnotify.WithBufferSize()`; the default of 64K is the highest value that
+ works on all platforms and is enough for most purposes, but in some cases a
+ highest buffer is needed. ([#521])
+
+### Changes and fixes
+
+- inotify: remove watcher if a watched path is renamed ([#518])
+
+ After a rename the reported name wasn't updated, or even an empty string.
+ Inotify doesn't provide any good facilities to update it, so just remove the
+ watcher. This is already how it worked on kqueue and FEN.
+
+ On Windows this does work, and remains working.
+
+- windows: don't listen for file attribute changes ([#520])
+
+ File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
+ with no way to see if they're a file write or attribute change, so would show
+ up as a fsnotify.Write event. This is never useful, and could result in many
+ spurious Write events.
+
+- windows: return `ErrEventOverflow` if the buffer is full ([#525])
+
+ Before it would merely return "short read", making it hard to detect this
+ error.
+
+- kqueue: make sure events for all files are delivered properly when removing a
+ watched directory ([#526])
+
+ Previously they would get sent with `""` (empty string) or `"."` as the path
+ name.
+
+- kqueue: don't emit spurious Create events for symbolic links ([#524])
+
+ The link would get resolved but kqueue would "forget" it already saw the link
+ itself, resulting on a Create for every Write event for the directory.
+
+- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
+
+- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
+ `backend_other.go`, making it easier to use on unsupported platforms such as
+ WASM, AIX, etc. ([#528])
+
+- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
+ Google AppEngine forbids usage of the unsafe package so the inotify backend
+ won't compile there.
-## [1.6.0] - 2022-10-13
+[#371]: https://github.com/fsnotify/fsnotify/pull/371
+[#516]: https://github.com/fsnotify/fsnotify/pull/516
+[#518]: https://github.com/fsnotify/fsnotify/pull/518
+[#520]: https://github.com/fsnotify/fsnotify/pull/520
+[#521]: https://github.com/fsnotify/fsnotify/pull/521
+[#524]: https://github.com/fsnotify/fsnotify/pull/524
+[#525]: https://github.com/fsnotify/fsnotify/pull/525
+[#526]: https://github.com/fsnotify/fsnotify/pull/526
+[#528]: https://github.com/fsnotify/fsnotify/pull/528
+[#537]: https://github.com/fsnotify/fsnotify/pull/537
+[#550]: https://github.com/fsnotify/fsnotify/pull/550
+[#572]: https://github.com/fsnotify/fsnotify/pull/572
+1.6.0 - 2022-10-13
+------------------
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
but not documented). It also increases the minimum Linux version to 2.6.32.
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/README.md b/test/tools/vendor/github.com/fsnotify/fsnotify/README.md
index d4e6080feb2..e480733d16c 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/README.md
@@ -1,29 +1,31 @@
fsnotify is a Go library to provide cross-platform filesystem notifications on
-Windows, Linux, macOS, and BSD systems.
+Windows, Linux, macOS, BSD, and illumos.
-Go 1.16 or newer is required; the full documentation is at
+Go 1.17 or newer is required; the full documentation is at
https://pkg.go.dev/github.com/fsnotify/fsnotify
-**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
-released version, whereas this README is for the last development version which
-may include additions/changes.**
-
---
Platform support:
-| Adapter | OS | Status |
-| --------------------- | ---------------| -------------------------------------------------------------|
-| inotify | Linux 2.6.32+ | Supported |
-| kqueue | BSD, macOS | Supported |
-| ReadDirectoryChangesW | Windows | Supported |
-| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
-| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
-| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
-| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
-| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
-
-Linux and macOS should include Android and iOS, but these are currently untested.
+| Backend | OS | Status |
+| :-------------------- | :--------- | :------------------------------------------------------------------------ |
+| inotify | Linux | Supported |
+| kqueue | BSD, macOS | Supported |
+| ReadDirectoryChangesW | Windows | Supported |
+| FEN | illumos | Supported |
+| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
+| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
+| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
+| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
+| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
+
+Linux and illumos should include Android and Solaris, but these are currently
+untested.
+
+[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
+[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
+[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
Usage
-----
@@ -83,20 +85,23 @@ run with:
% go run ./cmd/fsnotify
+Further detailed documentation can be found in godoc:
+https://pkg.go.dev/github.com/fsnotify/fsnotify
+
FAQ
---
### Will a file still be watched when it's moved to another directory?
No, not unless you are watching the location it was moved to.
-### Are subdirectories watched too?
+### Are subdirectories watched?
No, you must add watches for any directory you want to watch (a recursive
watcher is on the roadmap: [#18]).
[#18]: https://github.com/fsnotify/fsnotify/issues/18
### Do I have to watch the Error and Event channels in a goroutine?
-As of now, yes (you can read both channels in the same goroutine using `select`,
-you don't need a separate goroutine for both channels; see the example).
+Yes. You can read both channels in the same goroutine using `select` (you don't
+need a separate goroutine for both channels; see the example).
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
fsnotify requires support from underlying OS to work. The current NFS and SMB
@@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
[#9]: https://github.com/fsnotify/fsnotify/issues/9
+### Why do I get many Chmod events?
+Some programs may generate a lot of attribute changes; for example Spotlight on
+macOS, anti-virus programs, backup applications, and some others are known to do
+this. As a rule, it's typically best to ignore Chmod events. They're often not
+useful, and tend to cause problems.
+
+Spotlight indexing on macOS can result in multiple events (see [#15]). A
+temporary workaround is to add your folder(s) to the *Spotlight Privacy
+settings* until we have a native FSEvents implementation (see [#11]).
+
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#15]: https://github.com/fsnotify/fsnotify/issues/15
+
+### Watching a file doesn't work well
+Watching individual files (rather than directories) is generally not recommended
+as many programs (especially editors) update files atomically: it will write to
+a temporary file which is then moved to to destination, overwriting the original
+(or some variant thereof). The watcher on the original file is now lost, as that
+no longer exists.
+
+The upshot of this is that a power failure or crash won't leave a half-written
+file.
+
+Watch the parent directory and use `Event.Name` to filter out files you're not
+interested in. There is an example of this in `cmd/fsnotify/file.go`.
+
Platform-specific notes
-----------------------
### Linux
@@ -151,11 +182,3 @@ these platforms.
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
control the maximum number of open files.
-
-### macOS
-Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
-workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
-have a native FSEvents implementation (see [#11]).
-
-[#11]: https://github.com/fsnotify/fsnotify/issues/11
-[#15]: https://github.com/fsnotify/fsnotify/issues/15
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go
index 1a95ad8e7ce..28497f1dd8e 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go
@@ -1,10 +1,19 @@
//go:build solaris
// +build solaris
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
+
package fsnotify
import (
"errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "golang.org/x/sys/unix"
)
// Watcher watches a set of paths, delivering events on a channel.
@@ -17,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -33,16 +42,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -58,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -92,44 +107,129 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
+
+ mu sync.Mutex
+ port *unix.EventPort
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ dirs map[string]struct{} // Explicitly watched directories
+ watches map[string]struct{} // Explicitly watched non-directories
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
- return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+ return NewBufferedWatcher(0)
}
-// Close removes all watches and closes the events channel.
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
+ w := &Watcher{
+ Events: make(chan Event, sz),
+ Errors: make(chan error),
+ dirs: make(map[string]struct{}),
+ watches: make(map[string]struct{}),
+ done: make(chan struct{}),
+ }
+
+ var err error
+ w.port, err = unix.NewEventPort()
+ if err != nil {
+ return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// sendEvent attempts to send an event to the user, returning true if the event
+// was put in the channel successfully and false if the watcher has been closed.
+func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
+ select {
+ case w.Events <- Event{Name: name, Op: op}:
+ return true
+ case <-w.done:
+ return false
+ }
+}
+
+// sendError attempts to send an error to the user, returning true if the error
+// was put in the channel successfully and false if the watcher has been closed.
+func (w *Watcher) sendError(err error) (sent bool) {
+ select {
+ case w.Errors <- err:
+ return true
+ case <-w.done:
+ return false
+ }
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
- return nil
+ // Take the lock used by associateFile to prevent lingering events from
+ // being processed after the close
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.isClosed() {
+ return nil
+ }
+ close(w.done)
+ return w.port.Close()
}
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -139,15 +239,63 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+ if w.isClosed() {
+ return ErrClosed
+ }
+ if w.port.PathIsWatched(name) {
+ return nil
+ }
+
+ _ = getOptions(opts...)
+
+ // Currently we resolve symlinks that were explicitly requested to be
+ // watched. Otherwise we would use LStat here.
+ stat, err := os.Stat(name)
+ if err != nil {
+ return err
+ }
+
+ // Associate all files in the directory.
+ if stat.IsDir() {
+ err := w.handleDirectory(name, stat, true, w.associateFile)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.dirs[name] = struct{}{}
+ w.mu.Unlock()
+ return nil
+ }
+
+ err = w.associateFile(name, stat, true)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.watches[name] = struct{}{}
+ w.mu.Unlock()
return nil
}
@@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
+ if w.isClosed() {
+ return nil
+ }
+ if !w.port.PathIsWatched(name) {
+ return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
+ }
+
+ // The user has expressed an intent. Immediately remove this name from
+ // whichever watch list it might be in. If it's not in there the delete
+ // doesn't cause harm.
+ w.mu.Lock()
+ delete(w.watches, name)
+ delete(w.dirs, name)
+ w.mu.Unlock()
+
+ stat, err := os.Stat(name)
+ if err != nil {
+ return err
+ }
+
+ // Remove associations for every file in the directory.
+ if stat.IsDir() {
+ err := w.handleDirectory(name, stat, false, w.dissociateFile)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ err = w.port.DissociatePath(name)
+ if err != nil {
+ return err
+ }
+
return nil
}
+
+// readEvents contains the main loop that runs in a goroutine watching for events.
+func (w *Watcher) readEvents() {
+ // If this function returns, the watcher has been closed and we can close
+ // these channels
+ defer func() {
+ close(w.Errors)
+ close(w.Events)
+ }()
+
+ pevents := make([]unix.PortEvent, 8)
+ for {
+ count, err := w.port.Get(pevents, 1, nil)
+ if err != nil && err != unix.ETIME {
+ // Interrupted system call (count should be 0) ignore and continue
+ if errors.Is(err, unix.EINTR) && count == 0 {
+ continue
+ }
+ // Get failed because we called w.Close()
+ if errors.Is(err, unix.EBADF) && w.isClosed() {
+ return
+ }
+ // There was an error not caused by calling w.Close()
+ if !w.sendError(err) {
+ return
+ }
+ }
+
+ p := pevents[:count]
+ for _, pevent := range p {
+ if pevent.Source != unix.PORT_SOURCE_FILE {
+ // Event from unexpected source received; should never happen.
+ if !w.sendError(errors.New("Event from unexpected source received")) {
+ return
+ }
+ continue
+ }
+
+ err = w.handleEvent(&pevent)
+ if err != nil {
+ if !w.sendError(err) {
+ return
+ }
+ }
+ }
+ }
+}
+
+func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ // Handle all children of the directory.
+ for _, entry := range files {
+ finfo, err := entry.Info()
+ if err != nil {
+ return err
+ }
+ err = handler(filepath.Join(path, finfo.Name()), finfo, false)
+ if err != nil {
+ return err
+ }
+ }
+
+ // And finally handle the directory itself.
+ return handler(path, stat, follow)
+}
+
+// handleEvent might need to emit more than one fsnotify event if the events
+// bitmap matches more than one event type (e.g. the file was both modified and
+// had the attributes changed between when the association was created and the
+// when event was returned)
+func (w *Watcher) handleEvent(event *unix.PortEvent) error {
+ var (
+ events = event.Events
+ path = event.Path
+ fmode = event.Cookie.(os.FileMode)
+ reRegister = true
+ )
+
+ w.mu.Lock()
+ _, watchedDir := w.dirs[path]
+ _, watchedPath := w.watches[path]
+ w.mu.Unlock()
+ isWatched := watchedDir || watchedPath
+
+ if events&unix.FILE_DELETE != 0 {
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ reRegister = false
+ }
+ if events&unix.FILE_RENAME_FROM != 0 {
+ if !w.sendEvent(path, Rename) {
+ return nil
+ }
+ // Don't keep watching the new file name
+ reRegister = false
+ }
+ if events&unix.FILE_RENAME_TO != 0 {
+ // We don't report a Rename event for this case, because Rename events
+ // are interpreted as referring to the _old_ name of the file, and in
+ // this case the event would refer to the new name of the file. This
+ // type of rename event is not supported by fsnotify.
+
+ // inotify reports a Remove event in this case, so we simulate this
+ // here.
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ // Don't keep watching the file that was removed
+ reRegister = false
+ }
+
+ // The file is gone, nothing left to do.
+ if !reRegister {
+ if watchedDir {
+ w.mu.Lock()
+ delete(w.dirs, path)
+ w.mu.Unlock()
+ }
+ if watchedPath {
+ w.mu.Lock()
+ delete(w.watches, path)
+ w.mu.Unlock()
+ }
+ return nil
+ }
+
+ // If we didn't get a deletion the file still exists and we're going to have
+ // to watch it again. Let's Stat it now so that we can compare permissions
+ // and have what we need to continue watching the file
+
+ stat, err := os.Lstat(path)
+ if err != nil {
+ // This is unexpected, but we should still emit an event. This happens
+ // most often on "rm -r" of a subdirectory inside a watched directory We
+ // get a modify event of something happening inside, but by the time we
+ // get here, the sudirectory is already gone. Clearly we were watching
+ // this path but now it is gone. Let's tell the user that it was
+ // removed.
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ // Suppress extra write events on removed directories; they are not
+ // informative and can be confusing.
+ return nil
+ }
+
+ // resolve symlinks that were explicitly watched as we would have at Add()
+ // time. this helps suppress spurious Chmod events on watched symlinks
+ if isWatched {
+ stat, err = os.Stat(path)
+ if err != nil {
+ // The symlink still exists, but the target is gone. Report the
+ // Remove similar to above.
+ if !w.sendEvent(path, Remove) {
+ return nil
+ }
+ // Don't return the error
+ }
+ }
+
+ if events&unix.FILE_MODIFIED != 0 {
+ if fmode.IsDir() {
+ if watchedDir {
+ if err := w.updateDirectory(path); err != nil {
+ return err
+ }
+ } else {
+ if !w.sendEvent(path, Write) {
+ return nil
+ }
+ }
+ } else {
+ if !w.sendEvent(path, Write) {
+ return nil
+ }
+ }
+ }
+ if events&unix.FILE_ATTRIB != 0 && stat != nil {
+ // Only send Chmod if perms changed
+ if stat.Mode().Perm() != fmode.Perm() {
+ if !w.sendEvent(path, Chmod) {
+ return nil
+ }
+ }
+ }
+
+ if stat != nil {
+ // If we get here, it means we've hit an event above that requires us to
+ // continue watching the file or directory
+ return w.associateFile(path, stat, isWatched)
+ }
+ return nil
+}
+
+func (w *Watcher) updateDirectory(path string) error {
+ // The directory was modified, so we must find unwatched entities and watch
+ // them. If something was removed from the directory, nothing will happen,
+ // as everything else should still be watched.
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ for _, entry := range files {
+ path := filepath.Join(path, entry.Name())
+ if w.port.PathIsWatched(path) {
+ continue
+ }
+
+ finfo, err := entry.Info()
+ if err != nil {
+ return err
+ }
+ err = w.associateFile(path, finfo, false)
+ if err != nil {
+ if !w.sendError(err) {
+ return nil
+ }
+ }
+ if !w.sendEvent(path, Create) {
+ return nil
+ }
+ }
+ return nil
+}
+
+func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
+ if w.isClosed() {
+ return ErrClosed
+ }
+ // This is primarily protecting the call to AssociatePath but it is
+ // important and intentional that the call to PathIsWatched is also
+ // protected by this mutex. Without this mutex, AssociatePath has been seen
+ // to error out that the path is already associated.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.port.PathIsWatched(path) {
+ // Remove the old association in favor of this one If we get ENOENT,
+ // then while the x/sys/unix wrapper still thought that this path was
+ // associated, the underlying event port did not. This call will have
+ // cleared up that discrepancy. The most likely cause is that the event
+ // has fired but we haven't processed it yet.
+ err := w.port.DissociatePath(path)
+ if err != nil && err != unix.ENOENT {
+ return err
+ }
+ }
+ // FILE_NOFOLLOW means we watch symlinks themselves rather than their
+ // targets.
+ events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
+ if follow {
+ // We *DO* follow symlinks for explicitly watched entries.
+ events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
+ }
+ return w.port.AssociatePath(path, stat,
+ events,
+ stat.Mode())
+}
+
+func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
+ if !w.port.PathIsWatched(path) {
+ return nil
+ }
+ return w.port.DissociatePath(path)
+}
+
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string {
+ if w.isClosed() {
+ return nil
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches)+len(w.dirs))
+ for pathname := range w.dirs {
+ entries = append(entries, pathname)
+ }
+ for pathname := range w.watches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
index 54c77fbb0ee..921c1c1e401 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
@@ -1,5 +1,8 @@
-//go:build linux
-// +build linux
+//go:build linux && !appengine
+// +build linux,!appengine
+
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
@@ -26,9 +29,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -42,16 +45,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -67,14 +70,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -101,36 +110,148 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
// calling Fd(). See: https://github.com/golang/go/issues/26439
fd int
- mu sync.Mutex // Map access
inotifyFile *os.File
- watches map[string]*watch // Map of inotify watches (key: path)
- paths map[int]string // Map of watched paths (key: watch descriptor)
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- doneResp chan struct{} // Channel to respond to Close
+ watches *watches
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ closeMu sync.Mutex
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+type (
+ watches struct {
+ mu sync.RWMutex
+ wd map[uint32]*watch // wd → watch
+ path map[string]uint32 // pathname → wd
+ }
+ watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+ path string // Watch path.
+ }
+)
+
+func newWatches() *watches {
+ return &watches{
+ wd: make(map[uint32]*watch),
+ path: make(map[string]uint32),
+ }
+}
+
+func (w *watches) len() int {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ return len(w.wd)
+}
+
+func (w *watches) add(ww *watch) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.wd[ww.wd] = ww
+ w.path[ww.path] = ww.wd
+}
+
+func (w *watches) remove(wd uint32) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ delete(w.path, w.wd[wd].path)
+ delete(w.wd, wd)
+}
+
+func (w *watches) removePath(path string) (uint32, bool) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ wd, ok := w.path[path]
+ if !ok {
+ return 0, false
+ }
+
+ delete(w.path, path)
+ delete(w.wd, wd)
+
+ return wd, true
+}
+
+func (w *watches) byPath(path string) *watch {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ return w.wd[w.path[path]]
+}
+
+func (w *watches) byWd(wd uint32) *watch {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+ return w.wd[wd]
+}
+
+func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ var existing *watch
+ wd, ok := w.path[path]
+ if ok {
+ existing = w.wd[wd]
+ }
+
+ upd, err := f(existing)
+ if err != nil {
+ return err
+ }
+ if upd != nil {
+ w.wd[upd.wd] = upd
+ w.path[upd.path] = upd.wd
+
+ if upd.wd != wd {
+ delete(w.wd, wd)
+ }
+ }
+
+ return nil
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
- // Create inotify fd
- // Need to set the FD to nonblocking mode in order for SetDeadline methods to work
- // Otherwise, blocking i/o operations won't terminate on close
+ return NewBufferedWatcher(0)
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
+ // Need to set nonblocking mode for SetDeadline to work, otherwise blocking
+ // I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
if fd == -1 {
return nil, errno
@@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) {
w := &Watcher{
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
- watches: make(map[string]*watch),
- paths: make(map[int]string),
- Events: make(chan Event),
+ watches: newWatches(),
+ Events: make(chan Event, sz),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
@@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e:
return true
case <-w.done:
+ return false
}
- return false
}
// Returns true if the error was sent, or false if watcher is closed.
@@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool {
}
}
-// Close removes all watches and closes the events channel.
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
- w.mu.Lock()
+ w.closeMu.Lock()
if w.isClosed() {
- w.mu.Unlock()
+ w.closeMu.Unlock()
return nil
}
-
- // Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
- w.mu.Unlock()
+ w.closeMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@@ -207,17 +325,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -227,44 +349,59 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- name = filepath.Clean(name)
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
- return errors.New("inotify instance already closed")
+ return ErrClosed
}
+ name = filepath.Clean(name)
+ _ = getOptions(opts...)
+
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
- w.mu.Lock()
- defer w.mu.Unlock()
- watchEntry := w.watches[name]
- if watchEntry != nil {
- flags |= watchEntry.flags | unix.IN_MASK_ADD
- }
- wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
- if wd == -1 {
- return errno
- }
+ return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
+ if existing != nil {
+ flags |= existing.flags | unix.IN_MASK_ADD
+ }
- if watchEntry == nil {
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- } else {
- watchEntry.wd = uint32(wd)
- watchEntry.flags = flags
- }
+ wd, err := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return nil, err
+ }
- return nil
+ if existing == nil {
+ return &watch{
+ wd: uint32(wd),
+ path: name,
+ flags: flags,
+ }, nil
+ }
+
+ existing.wd = uint32(wd)
+ existing.flags = flags
+ return existing, nil
+ })
}
// Remove stops monitoring the path for changes.
@@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
-
- // Fetch the watch.
- w.mu.Lock()
- defer w.mu.Unlock()
- watch, ok := w.watches[name]
+ if w.isClosed() {
+ return nil
+ }
+ return w.remove(filepath.Clean(name))
+}
- // Remove it from inotify.
+func (w *Watcher) remove(name string) error {
+ wd, ok := w.watches.removePath(name)
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
- // We successfully removed the watch if InotifyRmWatch doesn't return an
- // error, we need to clean up our internal state to ensure it matches
- // inotify's kernel state.
- delete(w.paths, int(watch.wd))
- delete(w.watches, name)
-
- // inotify_rm_watch will return EINVAL if the file has been deleted;
- // the inotify will already have been removed.
- // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
- // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
- // so that EINVAL means that the wd is being rm_watch()ed or its file removed
- // by another thread and we have not received IN_IGNORE event.
- success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ success, errno := unix.InotifyRmWatch(w.fd, wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case;
// The only two possible errors are:
@@ -312,28 +439,28 @@ func (w *Watcher) Remove(name string) error {
// are watching is deleted.
return errno
}
-
return nil
}
-// WatchList returns all paths added with [Add] (and are not yet removed).
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
- w.mu.Lock()
- defer w.mu.Unlock()
+ if w.isClosed() {
+ return nil
+ }
- entries := make([]string, 0, len(w.watches))
- for pathname := range w.watches {
+ entries := make([]string, 0, w.watches.len())
+ w.watches.mu.RLock()
+ for pathname := range w.watches.path {
entries = append(entries, pathname)
}
+ w.watches.mu.RUnlock()
return entries
}
-type watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
-}
-
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
@@ -367,14 +494,11 @@ func (w *Watcher) readEvents() {
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
- // If EOF is received. This should really never happen.
- err = io.EOF
+ err = io.EOF // If EOF is received. This should really never happen.
} else if n < 0 {
- // If an error occurred while reading.
- err = errno
+ err = errno // If an error occurred while reading.
} else {
- // Read was too short.
- err = errors.New("notify: short read in readEvents()")
+ err = errors.New("notify: short read in readEvents()") // Read was too short.
}
if !w.sendError(err) {
return
@@ -403,18 +527,29 @@ func (w *Watcher) readEvents() {
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
- w.mu.Lock()
- name, ok := w.paths[int(raw.Wd)]
- // IN_DELETE_SELF occurs when the file/directory being watched is removed.
- // This is a sign to clean up the maps, otherwise we are no longer in sync
- // with the inotify kernel state which has already deleted the watch
- // automatically.
- if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
- delete(w.paths, int(raw.Wd))
- delete(w.watches, name)
+ watch := w.watches.byWd(uint32(raw.Wd))
+
+ // inotify will automatically remove the watch on deletes; just need
+ // to clean our state here.
+ if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ w.watches.remove(watch.wd)
+ }
+ // We can't really update the state when a watched path is moved;
+ // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
+ // the watch.
+ if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
+ err := w.remove(watch.path)
+ if err != nil && !errors.Is(err, ErrNonExistentWatch) {
+ if !w.sendError(err) {
+ return
+ }
+ }
}
- w.mu.Unlock()
+ var name string
+ if watch != nil {
+ name = watch.path
+ }
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
index 29087469bf8..063a0915a07 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
@@ -1,12 +1,14 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
+
package fsnotify
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -24,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -40,16 +42,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -65,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -99,18 +107,27 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
done chan struct{}
@@ -133,6 +150,18 @@ type pathInfo struct {
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(0)
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
@@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) {
paths: make(map[int]pathInfo),
fileExists: make(map[string]struct{}),
userWatches: make(map[string]struct{}),
- Events: make(chan Event),
+ Events: make(chan Event, sz),
Errors: make(chan error),
done: make(chan struct{}),
}
@@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e:
return true
case <-w.done:
+ return false
}
- return false
}
// Returns true if the error was sent, or false if watcher is closed.
@@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool {
case w.Errors <- err:
return true
case <-w.done:
+ return false
}
- return false
}
-// Close removes all watches and closes the events channel.
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
@@ -239,17 +268,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -259,15 +292,28 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+ _ = getOptions(opts...)
+
w.mu.Lock()
w.userWatches[name] = struct{}{}
w.mu.Unlock()
@@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
+ return w.remove(name, true)
+}
+
+func (w *Watcher) remove(name string, unwatchFiles bool) error {
name = filepath.Clean(name)
w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
@@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error {
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
- if isDir {
+ if unwatchFiles && isDir {
var pathsToRemove []string
w.mu.Lock()
for fd := range w.watchesByDir[name] {
@@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error {
}
w.mu.Unlock()
for _, name := range pathsToRemove {
- // Since these are internal, not much sense in propagating error
- // to the user, as that will just confuse them with an error about
- // a path they did not explicitly watch themselves.
+ // Since these are internal, not much sense in propagating error to
+ // the user, as that will just confuse them with an error about a
+ // path they did not explicitly watch themselves.
w.Remove(name)
}
}
-
return nil
}
-// WatchList returns all paths added with [Add] (and are not yet removed).
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
+ if w.isClosed {
+ return nil
+ }
entries := make([]string, 0, len(w.userWatches))
for pathname := range w.userWatches {
@@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string {
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
-// addWatch adds name to the watched file set.
-// The flags are interpreted as described in kevent(2).
-// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+// addWatch adds name to the watched file set; the flags are interpreted as
+// described in kevent(2).
+//
+// Returns the real path to the file which was added, with symlinks resolved.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
- // Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
- return "", errors.New("kevent instance already closed")
+ return "", ErrClosed
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
@@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
return "", nil
}
- // Follow Symlinks
- //
- // Linux can add unresolvable symlinks to the watch list without issue,
- // and Windows can't do symlinks period. To maintain consistency, we
- // will act like everything is fine if the link can't be resolved.
- // There will simply be no file events for broken symlinks. Hence the
- // returns of nil on errors.
+ // Follow Symlinks.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
- name, err = filepath.EvalSymlinks(name)
+ link, err := os.Readlink(name)
if err != nil {
+ // Return nil because Linux can add unresolvable symlinks to the
+ // watch list without problems, so maintain consistency with
+ // that. There will be no file events for broken symlinks.
+ // TODO: more specific check; returns os.PathError; ENOENT?
return "", nil
}
w.mu.Lock()
- _, alreadyWatching = w.watches[name]
+ _, alreadyWatching = w.watches[link]
w.mu.Unlock()
if alreadyWatching {
- return name, nil
+ // Add to watches so we don't get spurious Create events later
+ // on when we diff the directories.
+ w.watches[name] = 0
+ w.fileExists[name] = struct{}{}
+ return link, nil
}
+ name = link
fi, err = os.Lstat(name)
if err != nil {
return "", nil
@@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
}
// Retry on EINTR; open() can return EINTR in practice on macOS.
- // See #354, and go issues 11180 and 39237.
+ // See #354, and Go issues 11180 and 39237.
for {
watchfd, err = unix.Open(name, openMode, 0)
if err == nil {
@@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
w.watchesByDir[parentName] = watchesByDir
}
watchesByDir[watchfd] = struct{}{}
-
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
- // Watch the directory if it has not been watched before,
- // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ // Watch the directory if it has not been watched before, or if it was
+ // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
@@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
defer func() {
- err := unix.Close(w.kq)
- if err != nil {
- w.Errors <- err
- }
- unix.Close(w.closepipe[0])
close(w.Events)
close(w.Errors)
+ _ = unix.Close(w.kq)
+ unix.Close(w.closepipe[0])
}()
eventBuffer := make([]unix.Kevent_t, 10)
@@ -513,18 +573,8 @@ func (w *Watcher) readEvents() {
event := w.newEvent(path.name, mask)
- if path.isDir && !event.Has(Remove) {
- // Double check to make sure the directory exists. This can
- // happen when we do a rm -fr on a recursively watched folders
- // and we receive a modification event first but the folder has
- // been deleted and later receive the delete event.
- if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
- event.Op |= Remove
- }
- }
-
if event.Has(Rename) || event.Has(Remove) {
- w.Remove(event.Name)
+ w.remove(event.Name, false)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
@@ -540,26 +590,30 @@ func (w *Watcher) readEvents() {
}
if event.Has(Remove) {
- // Look for a file that may have overwritten this.
- // For example, mv f1 f2 will delete f2, then create f2.
+ // Look for a file that may have overwritten this; for example,
+ // mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
- // make sure the directory exists before we watch for changes. When we
- // do a recursive watch and perform rm -fr, the parent directory might
- // have gone missing, ignore the missing directory and let the
- // upcoming delete event remove the watch from the parent directory.
- if _, err := os.Lstat(fileDir); err == nil {
- w.sendDirectoryChangeEvents(fileDir)
+ err := w.sendDirectoryChangeEvents(fileDir)
+ if err != nil {
+ if !w.sendError(err) {
+ closed = true
+ }
}
}
} else {
filePath := filepath.Clean(event.Name)
- if fileInfo, err := os.Lstat(filePath); err == nil {
- w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ if fi, err := os.Lstat(filePath); err == nil {
+ err := w.sendFileCreatedEventIfNew(filePath, fi)
+ if err != nil {
+ if !w.sendError(err) {
+ closed = true
+ }
+ }
}
}
}
@@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
+ // No point sending a write and delete event at the same time: if it's gone,
+ // then it's gone.
+ if e.Op.Has(Write) && e.Op.Has(Remove) {
+ e.Op &^= Write
+ }
return e
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
- files, err := ioutil.ReadDir(dirPath)
+ files, err := os.ReadDir(dirPath)
if err != nil {
return err
}
- for _, fileInfo := range files {
- path := filepath.Join(dirPath, fileInfo.Name())
+ for _, f := range files {
+ path := filepath.Join(dirPath, f.Name())
+
+ fi, err := f.Info()
+ if err != nil {
+ return fmt.Errorf("%q: %w", path, err)
+ }
- cleanPath, err := w.internalWatch(path, fileInfo)
+ cleanPath, err := w.internalWatch(path, fi)
if err != nil {
// No permission to read the file; that's not a problem: just skip.
// But do add it to w.fileExists to prevent it from being picked up
@@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
cleanPath = filepath.Clean(path)
default:
- return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
+ return fmt.Errorf("%q: %w", path, err)
}
}
@@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dir string) {
- // Get all files
- files, err := ioutil.ReadDir(dir)
+func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
+ files, err := os.ReadDir(dir)
if err != nil {
- if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
- return
+ // Directory no longer exists: we can ignore this safely. kqueue will
+ // still give us the correct events.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
}
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
- // Search for new files
- for _, fi := range files {
- err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
+ for _, f := range files {
+ fi, err := f.Info()
if err != nil {
- return
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
+ }
+
+ err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
+ if err != nil {
+ // Don't need to send an error if this file isn't readable.
+ if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
+ return nil
+ }
+ return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
}
+ return nil
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
-func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
@@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
}
// like watchDirectoryFiles (but without doing another ReadDir)
- filePath, err = w.internalWatch(filePath, fileInfo)
+ filePath, err = w.internalWatch(filePath, fi)
if err != nil {
return err
}
@@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
return nil
}
-func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
- if fileInfo.IsDir() {
- // mimic Linux providing delete events for subdirectories
- // but preserve the flags used if currently watching subdirectory
+func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
+ if fi.IsDir() {
+ // mimic Linux providing delete events for subdirectories, but preserve
+ // the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go
index a9bb1c3c4d0..d34a23c015f 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go
@@ -1,39 +1,169 @@
-//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
-// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
+// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
package fsnotify
-import (
- "fmt"
- "runtime"
-)
+import "errors"
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct{}
+// Watcher watches a set of paths, delivering events on a channel.
+//
+// A watcher should not be copied (e.g. pass it by pointer, rather than by
+// value).
+//
+// # Linux notes
+//
+// When a file is removed a Remove event won't be emitted until all file
+// descriptors are closed, and deletes will always emit a Chmod. For example:
+//
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
+//
+// This is the event that inotify sends, so not much can be changed about this.
+//
+// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
+// for the number of watches per user, and fs.inotify.max_user_instances
+// specifies the maximum number of inotify instances per user. Every Watcher you
+// create is an "instance", and every path you add is a "watch".
+//
+// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
+// /proc/sys/fs/inotify/max_user_instances
+//
+// To increase them you can use sysctl or write the value to the /proc file:
+//
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
+//
+// To make the changes persist on reboot edit /etc/sysctl.conf or
+// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
+// your distro's documentation):
+//
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
+//
+// Reaching the limit will result in a "no space left on device" or "too many open
+// files" error.
+//
+// # kqueue notes (macOS, BSD)
+//
+// kqueue requires opening a file descriptor for every file that's being watched;
+// so if you're watching a directory with five files then that's six file
+// descriptors. You will run in to your system's "max open files" limit faster on
+// these platforms.
+//
+// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
+// control the maximum number of open files, as well as /etc/login.conf on BSD
+// systems.
+//
+// # Windows notes
+//
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
+//
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
+type Watcher struct {
+ // Events sends the filesystem change events.
+ //
+ // fsnotify can send the following events; a "path" here can refer to a
+ // file, directory, symbolic link, or special file like a FIFO.
+ //
+ // fsnotify.Create A new path was created; this may be followed by one
+ // or more Write events if data also gets written to a
+ // file.
+ //
+ // fsnotify.Remove A path was removed.
+ //
+ // fsnotify.Rename A path was renamed. A rename is always sent with the
+ // old path as Event.Name, and a Create event will be
+ // sent with the new name. Renames are only sent for
+ // paths that are currently watched; e.g. moving an
+ // unmonitored file into a monitored directory will
+ // show up as just a Create. Similarly, renaming a file
+ // to outside a monitored directory will show up as
+ // only a Rename.
+ //
+ // fsnotify.Write A file or named pipe was written to. A Truncate will
+ // also trigger a Write. A single "write action"
+ // initiated by the user may show up as one or multiple
+ // writes, depending on when the system syncs things to
+ // disk. For example when compiling a large Go program
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
+ //
+ // fsnotify.Chmod Attributes were changed. On Linux this is also sent
+ // when a file is removed (or more accurately, when a
+ // link to an inode is removed). On kqueue it's sent
+ // when a file is truncated. On Windows it's never
+ // sent.
+ Events chan Event
+
+ // Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
+ Errors chan error
+}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
- return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
+ return nil, errors.New("fsnotify not supported on the current platform")
}
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- return nil
-}
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
+
+// Close removes all watches and closes the Events channel.
+func (w *Watcher) Close() error { return nil }
+
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) WatchList() []string { return nil }
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -43,17 +173,26 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- return nil
-}
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return nil }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
// Remove stops monitoring the path for changes.
//
@@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-func (w *Watcher) Remove(name string) error {
- return nil
-}
+//
+// Returns nil if [Watcher.Close] was called.
+func (w *Watcher) Remove(name string) error { return nil }
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go
index ae392867c04..9bc91e5d613 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go
@@ -1,6 +1,13 @@
//go:build windows
// +build windows
+// Windows backend based on ReadDirectoryChangesW()
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
+//
+// Note: the documentation on the Watcher type and methods is generated from
+// mkdoc.zsh
+
package fsnotify
import (
@@ -27,9 +34,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
+// fp := os.Open("file")
+// os.Remove("file") // Triggers Chmod
+// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -43,16 +50,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
+// # Default values on Linux 5.18
+// sysctl fs.inotify.max_user_watches=124983
+// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
+// fs.inotify.max_user_watches=124983
+// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -68,14 +75,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
-// # macOS notes
+// # Windows notes
//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
+// Paths can be added as "C:\path\to\dir", but forward slashes
+// ("C:/path/to/dir") will also work.
//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
+// When a watched directory is removed it will always send an event for the
+// directory itself, but may not send events for all files in that directory.
+// Sometimes it will send events for all times, sometimes it will send no
+// events, and often only for some files.
+//
+// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
+// value that is guaranteed to work with SMB filesystems. If you have many
+// events in quick succession this may not be enough, and you will have to use
+// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -102,31 +115,52 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
+ // you may get hundreds of Write events, and you may
+ // want to wait until you've stopped receiving them
+ // (see the dedup example in cmd/fsnotify).
+ //
+ // Some systems may send Write event for directories
+ // when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
+ // when a file is truncated. On Windows it's never
+ // sent.
Events chan Event
// Errors sends any errors.
+ //
+ // ErrEventOverflow is used to indicate there are too many events:
+ //
+ // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
+ // - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
+ // - kqueue, fen: Not used.
Errors chan error
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
- mu sync.Mutex // Protects access to watches, isClosed
- watches watchMap // Map of watches (key: i-number)
- isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Protects access to watches, closed
+ watches watchMap // Map of watches (key: i-number)
+ closed bool // Set to true when Close() is first called
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
+ return NewBufferedWatcher(50)
+}
+
+// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
+// channel.
+//
+// The main use case for this is situations with a very large number of events
+// where the kernel buffer size can't be increased (e.g. due to lack of
+// permissions). An unbuffered Watcher will perform better for almost all use
+// cases, and whenever possible you will be better off increasing the kernel
+// buffers instead of adding a large userspace buffer.
+func NewBufferedWatcher(sz uint) (*Watcher, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
@@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) {
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
- Events: make(chan Event, 50),
+ Events: make(chan Event, sz),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
@@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) {
return w, nil
}
+func (w *Watcher) isClosed() bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.closed
+}
+
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
@@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool {
return false
}
-// Close removes all watches and closes the events channel.
+// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
+ if w.isClosed() {
return nil
}
- w.isClosed = true
+
+ w.mu.Lock()
+ w.closed = true
w.mu.Unlock()
// Send "quit" message to the reader goroutine
@@ -188,17 +228,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
+// A path can only be watched once; watching it more than once is a no-op and will
+// not return an error. Paths that do not yet exist on the filesystem cannot be
+// watched.
//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
+// A watch will be automatically removed if the watched path is deleted or
+// renamed. The exception is the Windows backend, which doesn't remove the
+// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
+// Returns [ErrClosed] if [Watcher.Close] was called.
+//
+// See [Watcher.AddWith] for a version that allows adding options.
+//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -208,27 +252,41 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return errors.New("watcher already closed")
+// recommended as many programs (especially editors) update files atomically: it
+// will write to a temporary file which is then moved to to destination,
+// overwriting the original (or some variant thereof). The watcher on the
+// original file is now lost, as that no longer exists.
+//
+// The upshot of this is that a power failure or crash won't leave a
+// half-written file.
+//
+// Watch the parent directory and use Event.Name to filter out files you're not
+// interested in. There is an example of this in cmd/fsnotify/file.go.
+func (w *Watcher) Add(name string) error { return w.AddWith(name) }
+
+// AddWith is like [Watcher.Add], but allows adding options. When using Add()
+// the defaults described below are used.
+//
+// Possible options are:
+//
+// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
+// other platforms. The default is 64K (65536 bytes).
+func (w *Watcher) AddWith(name string, opts ...addOpt) error {
+ if w.isClosed() {
+ return ErrClosed
+ }
+
+ with := getOptions(opts...)
+ if with.bufsize < 4096 {
+ return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
- w.mu.Unlock()
in := &input{
- op: opAddWatch,
- path: filepath.Clean(name),
- flags: sysFSALLEVENTS,
- reply: make(chan error),
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ bufsize: with.bufsize,
}
w.input <- in
if err := w.wakeupReader(); err != nil {
@@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
+ if w.isClosed() {
+ return nil
+ }
+
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
@@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply
}
-// WatchList returns all paths added with [Add] (and are not yet removed).
+// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
+// yet removed).
+//
+// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
+ if w.isClosed() {
+ return nil
+ }
+
w.mu.Lock()
defer w.mu.Unlock()
@@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string {
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
const (
sysFSALLEVENTS = 0xfff
- sysFSATTRIB = 0x4
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
@@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
- if mask&sysFSATTRIB == sysFSATTRIB {
- e.Op |= Chmod
- }
return e
}
@@ -321,10 +388,11 @@ const (
)
type input struct {
- op int
- path string
- flags uint32
- reply chan error
+ op int
+ path string
+ flags uint32
+ bufsize int
+ reply chan error
}
type inode struct {
@@ -334,13 +402,14 @@ type inode struct {
}
type watch struct {
- ov windows.Overlapped
- ino *inode // i-number
- path string // Directory path
- mask uint64 // Directory itself is being watched with these notify flags
- names map[string]uint64 // Map of names being watched and their notify flags
- rename string // Remembers the old name while renaming a file
- buf [65536]byte // 64K buffer
+ ov windows.Overlapped
+ ino *inode // i-number
+ recurse bool // Recursive watch?
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf []byte // buffer, allocated later
}
type (
@@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) {
}
// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) error {
+func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
+ //pathname, recurse := recursivePath(pathname)
+ recurse := false
+
dir, err := w.getDir(pathname)
if err != nil {
return err
@@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
return os.NewSyscallError("CreateIoCompletionPort", err)
}
watchEntry = &watch{
- ino: ino,
- path: dir,
- names: make(map[string]uint64),
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ recurse: recurse,
+ buf: make([]byte, bufsize),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
@@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
+ pathname, recurse := recursivePath(pathname)
+
dir, err := w.getDir(pathname)
if err != nil {
return err
@@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error {
watch := w.watches.get(ino)
w.mu.Unlock()
+ if recurse && !watch.recurse {
+ return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
+ }
+
err = windows.CloseHandle(ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
@@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error {
return nil
}
- rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
- uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ // We need to pass the array, rather than the slice.
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
+ rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
+ (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
+ watch.recurse, mask, nil, &watch.ov, 0)
if rdErr != nil {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
@@ -563,9 +646,8 @@ func (w *Watcher) readEvents() {
runtime.LockOSThread()
for {
+ // This error is handled after the watch == nil check below.
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
- // This error is handled after the watch == nil check below. NOTE: this
- // seems odd, note sure if it's correct.
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
@@ -595,7 +677,7 @@ func (w *Watcher) readEvents() {
case in := <-w.input:
switch in.op {
case opAddWatch:
- in.reply <- w.addWatch(in.path, uint64(in.flags))
+ in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
@@ -605,6 +687,8 @@ func (w *Watcher) readEvents() {
}
switch qErr {
+ case nil:
+ // No error
case windows.ERROR_MORE_DATA:
if watch == nil {
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
@@ -626,13 +710,12 @@ func (w *Watcher) readEvents() {
default:
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
continue
- case nil:
}
var offset uint32
for {
if n == 0 {
- w.sendError(errors.New("short read in readEvents()"))
+ w.sendError(ErrEventOverflow)
break
}
@@ -703,8 +786,9 @@ func (w *Watcher) readEvents() {
// Error!
if offset >= n {
+ //lint:ignore ST1005 Windows should be capitalized
w.sendError(errors.New(
- "Windows system assumed buffer larger than it is, events have likely been missed."))
+ "Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
@@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
}
- if mask&sysFSATTRIB != 0 {
- m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
- }
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
}
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/test/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index 30a5bf0f07a..24c99cc4999 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -1,13 +1,18 @@
-//go:build !plan9
-// +build !plan9
-
// Package fsnotify provides a cross-platform interface for file system
// notifications.
+//
+// Currently supported systems:
+//
+// Linux 2.6.32+ via inotify
+// BSD, macOS via kqueue
+// Windows via ReadDirectoryChangesW
+// illumos via FEN
package fsnotify
import (
"errors"
"fmt"
+ "path/filepath"
"strings"
)
@@ -33,34 +38,52 @@ type Op uint32
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
// full description, and check them with [Event.Has].
const (
+ // A new pathname was created.
Create Op = 1 << iota
+
+ // The pathname was written to; this does *not* mean the write has finished,
+ // and a write can be followed by more writes.
Write
+
+ // The path was removed; any watches on it will be removed. Some "remove"
+ // operations may trigger a Rename if the file is actually moved (for
+ // example "remove to trash" is often a rename).
Remove
+
+ // The path was renamed to something else; any watched on it will be
+ // removed.
Rename
+
+ // File attributes were changed.
+ //
+ // It's generally not recommended to take action on this event, as it may
+ // get triggered very frequently by some software. For example, Spotlight
+ // indexing on macOS, anti-virus software, backup software, etc.
Chmod
)
-// Common errors that can be reported by a watcher
+// Common errors that can be reported.
var (
- ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
- ErrEventOverflow = errors.New("fsnotify queue overflow")
+ ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
+ ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
+ ErrClosed = errors.New("fsnotify: watcher already closed")
)
-func (op Op) String() string {
+func (o Op) String() string {
var b strings.Builder
- if op.Has(Create) {
+ if o.Has(Create) {
b.WriteString("|CREATE")
}
- if op.Has(Remove) {
+ if o.Has(Remove) {
b.WriteString("|REMOVE")
}
- if op.Has(Write) {
+ if o.Has(Write) {
b.WriteString("|WRITE")
}
- if op.Has(Rename) {
+ if o.Has(Rename) {
b.WriteString("|RENAME")
}
- if op.Has(Chmod) {
+ if o.Has(Chmod) {
b.WriteString("|CHMOD")
}
if b.Len() == 0 {
@@ -70,7 +93,7 @@ func (op Op) String() string {
}
// Has reports if this operation has the given operation.
-func (o Op) Has(h Op) bool { return o&h == h }
+func (o Op) Has(h Op) bool { return o&h != 0 }
// Has reports if this event has the given operation.
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
@@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
func (e Event) String() string {
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}
+
+type (
+ addOpt func(opt *withOpts)
+ withOpts struct {
+ bufsize int
+ }
+)
+
+var defaultOpts = withOpts{
+ bufsize: 65536, // 64K
+}
+
+func getOptions(opts ...addOpt) withOpts {
+ with := defaultOpts
+ for _, o := range opts {
+ o(&with)
+ }
+ return with
+}
+
+// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
+//
+// This only has effect on Windows systems, and is a no-op for other backends.
+//
+// The default value is 64K (65536 bytes) which is the highest value that works
+// on all filesystems and should be enough for most applications, but if you
+// have a large burst of events it may not be enough. You can increase it if
+// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
+//
+// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
+func WithBufferSize(bytes int) addOpt {
+ return func(opt *withOpts) { opt.bufsize = bytes }
+}
+
+// Check if this path is recursive (ends with "/..." or "\..."), and return the
+// path with the /... stripped.
+func recursivePath(path string) (string, bool) {
+ if filepath.Base(path) == "..." {
+ return filepath.Dir(path), true
+ }
+ return path, false
+}
diff --git a/test/tools/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/test/tools/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
index b09ef768340..99012ae6539 100644
--- a/test/tools/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
+++ b/test/tools/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
@@ -2,8 +2,8 @@
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
setopt err_exit no_unset pipefail extended_glob
-# Simple script to update the godoc comments on all watchers. Probably took me
-# more time to write this than doing it manually, but ah well 🙃
+# Simple script to update the godoc comments on all watchers so you don't need
+# to update the same comment 5 times.
watcher=$(< This package currently only supports OpenAPI 2.0 (aka Swagger 2.0).
> There is no plan to make it evolve toward supporting OpenAPI 3.x.
> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
->
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/appveyor.yml b/test/tools/vendor/github.com/go-openapi/analysis/appveyor.yml
deleted file mode 100644
index c2f6fd733a9..00000000000
--- a/test/tools/vendor/github.com/go-openapi/analysis/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: "0.1.{build}"
-
-clone_folder: C:\go-openapi\analysis
-shallow_clone: true # for startup speed
-pull_requests:
- do_not_increment_build_number: true
-
-#skip_tags: true
-#skip_branch_with_pr: true
-
-# appveyor.yml
-build: off
-
-environment:
- GOPATH: c:\gopath
-
-stack: go 1.16
-
-test_script:
- - go test -v -timeout 20m ./...
-
-deploy: off
-
-notifications:
- - provider: Slack
- incoming_webhook: https://hooks.slack.com/services/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
- auth_token:
- secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4=
- channel: bots
- on_build_success: false
- on_build_failure: true
- on_build_status_changed: true
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/doc.go b/test/tools/vendor/github.com/go-openapi/analysis/doc.go
index d5294c0950b..e8d9f9b1312 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/doc.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/doc.go
@@ -16,27 +16,27 @@
Package analysis provides methods to work with a Swagger specification document from
package go-openapi/spec.
-Analyzing a specification
+## Analyzing a specification
An analysed specification object (type Spec) provides methods to work with swagger definition.
-Flattening or expanding a specification
+## Flattening or expanding a specification
Flattening a specification bundles all remote $ref in the main spec document.
Depending on flattening options, additional preprocessing may take place:
- full flattening: replacing all inline complex constructs by a named entry in #/definitions
- expand: replace all $ref's in the document by their expanded content
-Merging several specifications
+## Merging several specifications
Mixin several specifications merges all Swagger constructs, and warns about found conflicts.
-Fixing a specification
+## Fixing a specification
Unmarshalling a specification with golang json unmarshalling may lead to
some unwanted result on present but empty fields.
-Analyzing a Swagger schema
+## Analyzing a Swagger schema
Swagger schemas are analyzed to determine their complexity and qualify their content.
*/
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/flatten.go b/test/tools/vendor/github.com/go-openapi/analysis/flatten.go
index 0576220fb3d..ebedcc9df32 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/flatten.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/flatten.go
@@ -62,28 +62,26 @@ func newContext() *context {
//
// There is a minimal and a full flattening mode.
//
-//
// Minimally flattening a spec means:
-// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left
-// unscathed)
-// - Importing external (http, file) references so they become internal to the document
-// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers
-// like "$ref": "#/definitions/myObject/allOfs/1")
+// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left
+// unscathed)
+// - Importing external (http, file) references so they become internal to the document
+// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers
+// like "$ref": "#/definitions/myObject/allOfs/1")
//
// A minimally flattened spec thus guarantees the following properties:
-// - all $refs point to a local definition (i.e. '#/definitions/...')
-// - definitions are unique
+// - all $refs point to a local definition (i.e. '#/definitions/...')
+// - definitions are unique
//
// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they
// represent a complex schema or express commonality in the spec.
// Otherwise, they are simply expanded.
// Self-referencing JSON pointers cannot resolve to a type and trigger an error.
//
-//
// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger.
//
// Fully flattening a spec means:
-// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion.
+// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion.
//
// By complex, we mean every JSON object with some properties.
// Arrays, when they do not define a tuple,
@@ -93,22 +91,21 @@ func newContext() *context {
// have been created.
//
// Available flattening options:
-// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched
-// - Expand: expand all $ref's in the document (inoperant if Minimal set to true)
-// - Verbose: croaks about name conflicts detected
-// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening
+// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched
+// - Expand: expand all $ref's in the document (inoperant if Minimal set to true)
+// - Verbose: croaks about name conflicts detected
+// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening
//
// NOTE: expansion removes all $ref save circular $ref, which remain in place
//
// TODO: additional options
-// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a
-// x-go-name extension
-// - LiftAllOfs:
-// - limit the flattening of allOf members when simple objects
-// - merge allOf with validation only
-// - merge allOf with extensions only
-// - ...
-//
+// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a
+// x-go-name extension
+// - LiftAllOfs:
+// - limit the flattening of allOf members when simple objects
+// - merge allOf with validation only
+// - merge allOf with extensions only
+// - ...
func Flatten(opts FlattenOpts) error {
debugLog("FlattenOpts: %#v", opts)
@@ -270,6 +267,12 @@ func nameInlinedSchemas(opts *FlattenOpts) error {
}
func removeUnused(opts *FlattenOpts) {
+ for removeUnusedSinglePass(opts) {
+ // continue until no unused definition remains
+ }
+}
+
+func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) {
expected := make(map[string]struct{})
for k := range opts.Swagger().Definitions {
expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{}
@@ -280,6 +283,7 @@ func removeUnused(opts *FlattenOpts) {
}
for k := range expected {
+ hasRemoved = true
debugLog("removing unused definition %s", path.Base(k))
if opts.Verbose {
log.Printf("info: removing unused definition: %s", path.Base(k))
@@ -288,6 +292,8 @@ func removeUnused(opts *FlattenOpts) {
}
opts.Spec.reload() // re-analyze
+
+ return hasRemoved
}
func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error {
@@ -334,7 +340,7 @@ func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) err
}
// generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name
- newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref))
+ newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts))
debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen)
opts.flattenContext.resolved[refStr] = newName
@@ -488,9 +494,9 @@ func stripPointersAndOAIGen(opts *FlattenOpts) error {
// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions.
//
// A dedupe is deemed unnecessary whenever:
-// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining)
-// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to
-// the first parent.
+// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining)
+// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to
+// the first parent.
//
// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate
// pointer and name resolution again.
@@ -652,6 +658,7 @@ func namePointers(opts *FlattenOpts) error {
refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas))
for k, ref := range opts.Spec.references.allRefs {
+ debugLog("name pointers: %q => %#v", k, ref)
if path.Dir(ref.String()) == definitionsPath {
// this a ref to a top-level definition: ok
continue
@@ -769,6 +776,10 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema
// identifying edge case when the namer did nothing because we point to a non-schema object
// no definition is created and we expand the $ref for all callers
+ debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t",
+ asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(),
+ )
+
if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() {
debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String())
if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil {
@@ -791,6 +802,7 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema
return nil
}
+ // everything that is a simple schema and not factorizable is expanded
debugLog("expand JSON pointer for key=%s", key)
if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil {
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/flatten_name.go b/test/tools/vendor/github.com/go-openapi/analysis/flatten_name.go
index 3ad2ccfbfd5..c7d7938ebe6 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/flatten_name.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/flatten_name.go
@@ -33,12 +33,14 @@ func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *Ana
}
// create unique name
- newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name))
+ mangle := mangler(isn.opts)
+ newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name))
// clone schema
sch := schutils.Clone(schema)
// replace values on schema
+ debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName)
if err := replace.RewriteSchemaToRef(isn.Spec, key,
spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil {
return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err)
@@ -149,13 +151,15 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma
startIndex int
)
- if parts.IsOperation() {
+ switch {
+ case parts.IsOperation():
baseNames, startIndex = namesForOperation(parts, operations)
- }
-
- // definitions
- if parts.IsDefinition() {
+ case parts.IsDefinition():
baseNames, startIndex = namesForDefinition(parts)
+ default:
+ // this a non-standard pointer: build a name by concatenating its parts
+ baseNames = [][]string{parts}
+ startIndex = len(baseNames) + 1
}
result := make([]string, 0, len(baseNames))
@@ -169,6 +173,7 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma
}
sort.Strings(result)
+ debugLog("names from parts: %v => %v", parts, result)
return result
}
@@ -256,10 +261,20 @@ func partAdder(aschema *AnalyzedSchema) sortref.PartAdder {
}
}
-func nameFromRef(ref spec.Ref) string {
+func mangler(o *FlattenOpts) func(string) string {
+ if o.KeepNames {
+ return func(in string) string { return in }
+ }
+
+ return swag.ToJSONName
+}
+
+func nameFromRef(ref spec.Ref, o *FlattenOpts) string {
+ mangle := mangler(o)
+
u := ref.GetURL()
if u.Fragment != "" {
- return swag.ToJSONName(path.Base(u.Fragment))
+ return mangle(path.Base(u.Fragment))
}
if u.Path != "" {
@@ -267,19 +282,19 @@ func nameFromRef(ref spec.Ref) string {
if bn != "" && bn != "/" {
ext := path.Ext(bn)
if ext != "" {
- return swag.ToJSONName(bn[:len(bn)-len(ext)])
+ return mangle(bn[:len(bn)-len(ext)])
}
- return swag.ToJSONName(bn)
+ return mangle(bn)
}
}
- return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " "))
+ return mangle(strings.ReplaceAll(u.Host, ".", " "))
}
// GenLocation indicates from which section of the specification (models or operations) a definition has been created.
//
-// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided
+// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided
// for information only.
func GenLocation(parts sortref.SplitKey) string {
switch {
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/flatten_options.go b/test/tools/vendor/github.com/go-openapi/analysis/flatten_options.go
index c5bb97b0a69..c943fe1e84a 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/flatten_options.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/flatten_options.go
@@ -26,6 +26,7 @@ type FlattenOpts struct {
Verbose bool // enable some reporting on possible name conflicts detected
RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening
ContinueOnError bool // Continue when spec expansion issues are found
+ KeepNames bool // Do not attempt to jsonify names from references when flattening
/* Extra keys */
_ struct{} // require keys
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
index ec0fec02298..39f55a97bfd 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
@@ -29,7 +29,7 @@ var (
// GetLogger provides a prefix debug logger
func GetLogger(prefix string, debug bool) func(string, ...interface{}) {
if debug {
- logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags)
+ logger := log.New(output, prefix+":", log.LstdFlags)
return func(msg string, args ...interface{}) {
_, file1, pos1, _ := runtime.Caller(1)
@@ -37,5 +37,5 @@ func GetLogger(prefix string, debug bool) func(string, ...interface{}) {
}
}
- return func(msg string, args ...interface{}) {}
+ return func(_ string, _ ...interface{}) {}
}
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
index 26c2a05a310..c0f43e728a3 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
@@ -1,6 +1,7 @@
package replace
import (
+ "encoding/json"
"fmt"
"net/url"
"os"
@@ -40,6 +41,8 @@ func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error {
if refable.Schema != nil {
refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
}
+ case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{}
+ return rewriteParentRef(sp, key, ref)
default:
return fmt.Errorf("no schema with ref found at %s for %T", key, value)
}
@@ -120,6 +123,9 @@ func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error {
case spec.SchemaProperties:
container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ case *interface{}:
+ *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
// NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema
default:
@@ -318,8 +324,8 @@ type DeepestRefResult struct {
}
// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions.
-// - if no definition is found, returns the deepest ref.
-// - pointers to external files are expanded
+// - if no definition is found, returns the deepest ref.
+// - pointers to external files are expanded
//
// NOTE: all external $ref's are assumed to be already expanded at this stage.
func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) {
@@ -385,8 +391,9 @@ DOWNREF:
err := asSchema.UnmarshalJSON(asJSON)
if err != nil {
return nil,
- fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T",
- currentRef.String(), value)
+ fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
}
warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String()))
@@ -402,8 +409,9 @@ DOWNREF:
var asSchema spec.Schema
if err := asSchema.UnmarshalJSON(asJSON); err != nil {
return nil,
- fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T",
- currentRef.String(), value)
+ fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
}
warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String()))
@@ -414,9 +422,25 @@ DOWNREF:
currentRef = asSchema.Ref
default:
- return nil,
- fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T",
- currentRef.String(), value)
+ // fallback: attempts to resolve the pointer as a schema
+ if refable == nil {
+ break DOWNREF
+ }
+
+ asJSON, _ := json.Marshal(refable)
+ var asSchema spec.Schema
+ if err := asSchema.UnmarshalJSON(asJSON); err != nil {
+ return nil,
+ fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
+ }
+ warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable))
+
+ if asSchema.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = asSchema.Ref
}
}
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
index 18e552eadce..ac80fc2e832 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
@@ -69,7 +69,7 @@ func KeyParts(key string) SplitKey {
return res
}
-// SplitKey holds of the parts of a /-separated key, soi that their location may be determined.
+// SplitKey holds of the parts of a /-separated key, so that their location may be determined.
type SplitKey []string
// IsDefinition is true when the split key is in the #/definitions section of a spec
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/mixin.go b/test/tools/vendor/github.com/go-openapi/analysis/mixin.go
index b253052648c..7785a29b27d 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/mixin.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/mixin.go
@@ -53,7 +53,7 @@ import (
// collisions.
func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
skipped := make([]string, 0, len(mixins))
- opIds := getOpIds(primary)
+ opIDs := getOpIDs(primary)
initPrimary(primary)
for i, m := range mixins {
@@ -74,7 +74,7 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
skipped = append(skipped, mergeDefinitions(primary, m)...)
// merging paths requires a map of operationIDs to work with
- skipped = append(skipped, mergePaths(primary, m, opIds, i)...)
+ skipped = append(skipped, mergePaths(primary, m, opIDs, i)...)
skipped = append(skipped, mergeParameters(primary, m)...)
@@ -84,9 +84,9 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
return skipped
}
-// getOpIds extracts all the paths..operationIds from the given
+// getOpIDs extracts all the paths..operationIds from the given
// spec and returns them as the keys in a map with 'true' values.
-func getOpIds(s *spec.Swagger) map[string]bool {
+func getOpIDs(s *spec.Swagger) map[string]bool {
rv := make(map[string]bool)
if s.Paths == nil {
return rv
@@ -179,7 +179,7 @@ func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string)
return
}
-func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) {
+func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) {
if m.Paths != nil {
for k, v := range m.Paths.Paths {
if _, exists := primary.Paths.Paths[k]; exists {
@@ -198,10 +198,10 @@ func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, m
// all the proivded specs are already unique.
piops := pathItemOps(v)
for _, piop := range piops {
- if opIds[piop.ID] {
+ if opIDs[piop.ID] {
piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex)
}
- opIds[piop.ID] = true
+ opIDs[piop.ID] = true
}
primary.Paths.Paths[k] = v
}
@@ -367,7 +367,7 @@ func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string {
return skipped
}
-// nolint: unparam
+//nolint:unparam
func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string {
if primary.Description == "" {
primary.Description = m.Description
diff --git a/test/tools/vendor/github.com/go-openapi/analysis/schema.go b/test/tools/vendor/github.com/go-openapi/analysis/schema.go
index fc055095cbb..ab190db5b78 100644
--- a/test/tools/vendor/github.com/go-openapi/analysis/schema.go
+++ b/test/tools/vendor/github.com/go-openapi/analysis/schema.go
@@ -1,7 +1,7 @@
package analysis
import (
- "fmt"
+ "errors"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
@@ -19,7 +19,7 @@ type SchemaOpts struct {
// patterns.
func Schema(opts SchemaOpts) (*AnalyzedSchema, error) {
if opts.Schema == nil {
- return nil, fmt.Errorf("no schema to analyze")
+ return nil, errors.New("no schema to analyze")
}
a := &AnalyzedSchema{
@@ -247,10 +247,10 @@ func (a *AnalyzedSchema) isArrayType() bool {
// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex").
//
// Complex means the schema is any of:
-// - a simple type (primitive)
-// - an array of something (items are possibly complex ; if this is the case, items will generate a definition)
-// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will
-// generate a definition)
+// - a simple type (primitive)
+// - an array of something (items are possibly complex ; if this is the case, items will generate a definition)
+// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will
+// generate a definition)
func (a *AnalyzedSchema) isAnalyzedAsComplex() bool {
return !a.IsSimpleSchema && !a.IsArray && !a.IsMap
}
diff --git a/test/tools/vendor/github.com/go-openapi/errors/.golangci.yml b/test/tools/vendor/github.com/go-openapi/errors/.golangci.yml
index 4e1fc0c7d48..cf88ead324d 100644
--- a/test/tools/vendor/github.com/go-openapi/errors/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/errors/.golangci.yml
@@ -4,45 +4,59 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
+
linters:
enable-all: true
disable:
+ - errname # this repo doesn't follow the convention advised by this linter
- maligned
+ - unparam
- lll
+ - gochecknoinits
- gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
+ - gofumpt
- paralleltest
- tparallel
- - cyclop
- - errname
- - varnamelen
+ - thelper
+ - ifshort
- exhaustruct
- - maintidx
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/errors/README.md b/test/tools/vendor/github.com/go-openapi/errors/README.md
index 4aac049e608..6d57ea55c7c 100644
--- a/test/tools/vendor/github.com/go-openapi/errors/README.md
+++ b/test/tools/vendor/github.com/go-openapi/errors/README.md
@@ -1,11 +1,8 @@
-# OpenAPI errors
+# OpenAPI errors [](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/errors)
-[](https://travis-ci.org/go-openapi/errors)
-[](https://codecov.io/gh/go-openapi/errors)
[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE)
[](https://pkg.go.dev/github.com/go-openapi/errors)
-[](https://golangci.com)
[](https://goreportcard.com/report/github.com/go-openapi/errors)
Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit.
diff --git a/test/tools/vendor/github.com/go-openapi/errors/api.go b/test/tools/vendor/github.com/go-openapi/errors/api.go
index c13f3435fa8..5320cb96304 100644
--- a/test/tools/vendor/github.com/go-openapi/errors/api.go
+++ b/test/tools/vendor/github.com/go-openapi/errors/api.go
@@ -55,9 +55,15 @@ func (a apiError) MarshalJSON() ([]byte, error) {
// New creates a new API error with a code and a message
func New(code int32, message string, args ...interface{}) Error {
if len(args) > 0 {
- return &apiError{code, fmt.Sprintf(message, args...)}
+ return &apiError{
+ code: code,
+ message: fmt.Sprintf(message, args...),
+ }
+ }
+ return &apiError{
+ code: code,
+ message: message,
}
- return &apiError{code, message}
}
// NotFound creates a new not found error
@@ -130,10 +136,14 @@ func flattenComposite(errs *CompositeError) *CompositeError {
// MethodNotAllowed creates a new method not allowed error
func MethodNotAllowed(requested string, allow []string) Error {
msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ","))
- return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg}
+ return &MethodNotAllowedError{
+ code: http.StatusMethodNotAllowed,
+ Allowed: allow,
+ message: msg,
+ }
}
-// ServeError the error handler interface implementation
+// ServeError implements the http error handler interface
func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
rw.Header().Set("Content-Type", "application/json")
switch e := err.(type) {
diff --git a/test/tools/vendor/github.com/go-openapi/errors/schema.go b/test/tools/vendor/github.com/go-openapi/errors/schema.go
index da5f6c78cb5..cf7ac2ed4da 100644
--- a/test/tools/vendor/github.com/go-openapi/errors/schema.go
+++ b/test/tools/vendor/github.com/go-openapi/errors/schema.go
@@ -120,6 +120,10 @@ func (c *CompositeError) Error() string {
return c.message
}
+func (c *CompositeError) Unwrap() []error {
+ return c.Errors
+}
+
// MarshalJSON implements the JSON encoding interface
func (c CompositeError) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
@@ -133,7 +137,7 @@ func (c CompositeError) MarshalJSON() ([]byte, error) {
func CompositeValidationError(errors ...error) *CompositeError {
return &CompositeError{
code: CompositeErrorCode,
- Errors: append([]error{}, errors...),
+ Errors: append(make([]error, 0, len(errors)), errors...),
message: "validation failure list",
}
}
diff --git a/test/tools/vendor/github.com/go-openapi/inflect/.gitignore b/test/tools/vendor/github.com/go-openapi/inflect/.gitignore
new file mode 100644
index 00000000000..87c3bd3e66e
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/inflect/.gitignore
@@ -0,0 +1,5 @@
+secrets.yml
+coverage.out
+coverage.txt
+*.cov
+.idea
diff --git a/test/tools/vendor/github.com/go-openapi/inflect/.golangci.yml b/test/tools/vendor/github.com/go-openapi/inflect/.golangci.yml
new file mode 100644
index 00000000000..22f8d21cca1
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/inflect/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/inflect/.hgignore b/test/tools/vendor/github.com/go-openapi/inflect/.hgignore
deleted file mode 100644
index 6cc3d7ce113..00000000000
--- a/test/tools/vendor/github.com/go-openapi/inflect/.hgignore
+++ /dev/null
@@ -1 +0,0 @@
-swp$
diff --git a/test/tools/vendor/github.com/go-openapi/inflect/LICENCE b/test/tools/vendor/github.com/go-openapi/inflect/LICENSE
similarity index 100%
rename from test/tools/vendor/github.com/go-openapi/inflect/LICENCE
rename to test/tools/vendor/github.com/go-openapi/inflect/LICENSE
diff --git a/test/tools/vendor/github.com/go-openapi/inflect/README b/test/tools/vendor/github.com/go-openapi/inflect/README
deleted file mode 100644
index 014699a2225..00000000000
--- a/test/tools/vendor/github.com/go-openapi/inflect/README
+++ /dev/null
@@ -1,168 +0,0 @@
-INSTALLATION
-
-go get bitbucket.org/pkg/inflect
-
-PACKAGE
-
-package inflect
-
-
-FUNCTIONS
-
-func AddAcronym(word string)
-
-func AddHuman(suffix, replacement string)
-
-func AddIrregular(singular, plural string)
-
-func AddPlural(suffix, replacement string)
-
-func AddSingular(suffix, replacement string)
-
-func AddUncountable(word string)
-
-func Asciify(word string) string
-
-func Camelize(word string) string
-
-func CamelizeDownFirst(word string) string
-
-func Capitalize(word string) string
-
-func Dasherize(word string) string
-
-func ForeignKey(word string) string
-
-func ForeignKeyCondensed(word string) string
-
-func Humanize(word string) string
-
-func Ordinalize(word string) string
-
-func Parameterize(word string) string
-
-func ParameterizeJoin(word, sep string) string
-
-func Pluralize(word string) string
-
-func Singularize(word string) string
-
-func Tableize(word string) string
-
-func Titleize(word string) string
-
-func Typeify(word string) string
-
-func Uncountables() map[string]bool
-
-func Underscore(word string) string
-
-
-TYPES
-
-type Rule struct {
- // contains filtered or unexported fields
-}
-used by rulesets
-
-type Ruleset struct {
- // contains filtered or unexported fields
-}
-a Ruleset is the config of pluralization rules
-you can extend the rules with the Add* methods
-
-func NewDefaultRuleset() *Ruleset
-create a new ruleset and load it with the default
-set of common English pluralization rules
-
-func NewRuleset() *Ruleset
-create a blank ruleset. Unless you are going to
-build your own rules from scratch you probably
-won't need this and can just use the defaultRuleset
-via the global inflect.* methods
-
-func (rs *Ruleset) AddAcronym(word string)
-if you use acronym you may need to add them to the ruleset
-to prevent Underscored words of things like "HTML" coming out
-as "h_t_m_l"
-
-func (rs *Ruleset) AddHuman(suffix, replacement string)
-Human rules are applied by humanize to show more friendly
-versions of words
-
-func (rs *Ruleset) AddIrregular(singular, plural string)
-Add any inconsistant pluralizing/sinularizing rules
-to the set here.
-
-func (rs *Ruleset) AddPlural(suffix, replacement string)
-add a pluralization rule
-
-func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool)
-add a pluralization rule with full string match
-
-func (rs *Ruleset) AddSingular(suffix, replacement string)
-add a singular rule
-
-func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool)
-same as AddSingular but you can set `exact` to force
-a full string match
-
-func (rs *Ruleset) AddUncountable(word string)
-add a word to this ruleset that has the same singular and plural form
-for example: "rice"
-
-func (rs *Ruleset) Asciify(word string) string
-transforms latin characters like é -> e
-
-func (rs *Ruleset) Camelize(word string) string
-"dino_party" -> "DinoParty"
-
-func (rs *Ruleset) CamelizeDownFirst(word string) string
-same as Camelcase but with first letter downcased
-
-func (rs *Ruleset) Capitalize(word string) string
-uppercase first character
-
-func (rs *Ruleset) Dasherize(word string) string
-"SomeText" -> "some-text"
-
-func (rs *Ruleset) ForeignKey(word string) string
-an underscored foreign key name "Person" -> "person_id"
-
-func (rs *Ruleset) ForeignKeyCondensed(word string) string
-a foreign key (with an underscore) "Person" -> "personid"
-
-func (rs *Ruleset) Humanize(word string) string
-First letter of sentance captitilized
-Uses custom friendly replacements via AddHuman()
-
-func (rs *Ruleset) Ordinalize(str string) string
-"1031" -> "1031st"
-
-func (rs *Ruleset) Parameterize(word string) string
-param safe dasherized names like "my-param"
-
-func (rs *Ruleset) ParameterizeJoin(word, sep string) string
-param safe dasherized names with custom seperator
-
-func (rs *Ruleset) Pluralize(word string) string
-returns the plural form of a singular word
-
-func (rs *Ruleset) Singularize(word string) string
-returns the singular form of a plural word
-
-func (rs *Ruleset) Tableize(word string) string
-Rails style pluralized table names: "SuperPerson" -> "super_people"
-
-func (rs *Ruleset) Titleize(word string) string
-Captitilize every word in sentance "hello there" -> "Hello There"
-
-func (rs *Ruleset) Typeify(word string) string
-"something_like_this" -> "SomethingLikeThis"
-
-func (rs *Ruleset) Uncountables() map[string]bool
-
-func (rs *Ruleset) Underscore(word string) string
-lowercase underscore version "BigBen" -> "big_ben"
-
-
diff --git a/test/tools/vendor/github.com/go-openapi/inflect/README.md b/test/tools/vendor/github.com/go-openapi/inflect/README.md
new file mode 100644
index 00000000000..187b23b93ff
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/inflect/README.md
@@ -0,0 +1,18 @@
+# inflect [](https://github.com/go-openapi/inflect/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/inflect)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/inflect/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/inflect)
+[](https://goreportcard.com/report/github.com/go-openapi/inflect)
+
+A package to pluralize words.
+
+Originally forked from fork of https://bitbucket.org/pkg/inflect under a MIT License.
+
+A golang library applying grammar rules to English words.
+
+> This package provides a basic set of functions applying
+> grammar rules to inflect English words, modify case style
+> (Capitalize, camelCase, snake_case, etc.).
+>
+> Acronyms are properly handled. A common use case is word pluralization.
diff --git a/test/tools/vendor/github.com/go-openapi/inflect/inflect.go b/test/tools/vendor/github.com/go-openapi/inflect/inflect.go
index 3008844caf9..9d8ca6dbd41 100644
--- a/test/tools/vendor/github.com/go-openapi/inflect/inflect.go
+++ b/test/tools/vendor/github.com/go-openapi/inflect/inflect.go
@@ -19,12 +19,11 @@ type Rule struct {
// a Ruleset is the config of pluralization rules
// you can extend the rules with the Add* methods
type Ruleset struct {
- uncountables map[string]bool
- plurals []*Rule
- singulars []*Rule
- humans []*Rule
- acronyms []*Rule
- acronymMatcher *regexp.Regexp
+ uncountables map[string]bool
+ plurals []*Rule
+ singulars []*Rule
+ humans []*Rule
+ acronyms []*Rule
}
// create a blank ruleset. Unless you are going to
@@ -282,7 +281,7 @@ func (rs *Ruleset) AddHuman(suffix, replacement string) {
rs.humans = append([]*Rule{r}, rs.humans...)
}
-// Add any inconsistant pluralizing/sinularizing rules
+// Add any inconsistent pluralizing/sinularizing rules
// to the set here.
func (rs *Ruleset) AddIrregular(singular, plural string) {
delete(rs.uncountables, singular)
@@ -387,7 +386,7 @@ func (rs *Ruleset) Titleize(word string) string {
func (rs *Ruleset) safeCaseAcronyms(word string) string {
// convert an acroymn like HTML into Html
for _, rule := range rs.acronyms {
- word = strings.Replace(word, rule.suffix, rule.replacement, -1)
+ word = strings.ReplaceAll(word, rule.suffix, rule.replacement)
}
return word
}
@@ -409,7 +408,7 @@ func (rs *Ruleset) Humanize(word string) string {
word = replaceLast(word, "_id", "") // strip foreign key kinds
// replace and strings in humans list
for _, rule := range rs.humans {
- word = strings.Replace(word, rule.suffix, rule.replacement, -1)
+ word = strings.ReplaceAll(word, rule.suffix, rule.replacement)
}
sentance := rs.seperatedWords(word, " ")
return strings.ToUpper(sentance[:1]) + sentance[1:]
@@ -430,19 +429,19 @@ func (rs *Ruleset) Tableize(word string) string {
return rs.Pluralize(rs.Underscore(rs.Typeify(word)))
}
-var notUrlSafe *regexp.Regexp = regexp.MustCompile(`[^\w\d\-_ ]`)
+var notURLSafe = regexp.MustCompile(`[^\w\d\-_ ]`)
// param safe dasherized names like "my-param"
func (rs *Ruleset) Parameterize(word string) string {
return ParameterizeJoin(word, "-")
}
-// param safe dasherized names with custom seperator
+// param safe dasherized names with custom separator
func (rs *Ruleset) ParameterizeJoin(word, sep string) string {
word = strings.ToLower(word)
word = rs.Asciify(word)
- word = notUrlSafe.ReplaceAllString(word, "")
- word = strings.Replace(word, " ", sep, -1)
+ word = notURLSafe.ReplaceAllString(word, "")
+ word = strings.ReplaceAll(word, " ", sep)
if len(sep) > 0 {
squash, err := regexp.Compile(sep + "+")
if err == nil {
@@ -453,7 +452,7 @@ func (rs *Ruleset) ParameterizeJoin(word, sep string) string {
return word
}
-var lookalikes map[string]*regexp.Regexp = map[string]*regexp.Regexp{
+var lookalikes = map[string]*regexp.Regexp{
"A": regexp.MustCompile(`À|Á|Â|Ã|Ä|Å`),
"AE": regexp.MustCompile(`Æ`),
"C": regexp.MustCompile(`Ç`),
@@ -487,7 +486,7 @@ func (rs *Ruleset) Asciify(word string) string {
return word
}
-var tablePrefix *regexp.Regexp = regexp.MustCompile(`^[^.]*\.`)
+var tablePrefix = regexp.MustCompile(`^[^.]*\.`)
// "something_like_this" -> "SomethingLikeThis"
func (rs *Ruleset) Typeify(word string) string {
@@ -642,13 +641,13 @@ func reverse(s string) string {
func isSpacerChar(c rune) bool {
switch {
- case c == rune("_"[0]):
+ case c == '_':
return true
- case c == rune(" "[0]):
+ case c == ':':
return true
- case c == rune(":"[0]):
+ case c == '-':
return true
- case c == rune("-"[0]):
+ case unicode.IsSpace(c):
return true
}
return false
diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/test/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 00000000000..22f8d21cca1
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/README.md b/test/tools/vendor/github.com/go-openapi/jsonpointer/README.md
index 813788aff1c..0108f1d572d 100644
--- a/test/tools/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -1,6 +1,10 @@
-# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
-[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language
## Status
diff --git a/test/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go b/test/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go
index 7df9853def6..d970c7cf448 100644
--- a/test/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/test/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -26,6 +26,7 @@
package jsonpointer
import (
+ "encoding/json"
"errors"
"fmt"
"reflect"
@@ -40,6 +41,7 @@ const (
pointerSeparator = `/`
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
+ notFound = `Can't find the pointer in the document`
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONPointable interface {
- JSONLookup(string) (interface{}, error)
+ JSONLookup(string) (any, error)
}
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
- JSONSet(string, interface{}) error
+ JSONSet(string, any) error
}
// New creates a new json pointer for the given string
@@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
err = errors.New(invalidStart)
} else {
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
- for _, referenceToken := range referenceTokens[1:] {
- p.referenceTokens = append(p.referenceTokens, referenceToken)
- }
+ p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
}
}
@@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error {
}
// Get uses the pointer to retrieve a value from a JSON document
-func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
-func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+func (p *Pointer) Set(document any, value any) (any, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
-func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
-func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
-func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
+func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
+ }
- if rValue.Type().Implements(jsonPointableType) {
- r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
-func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
if ns, ok := node.(JSONSetable); ok { // pointer impl
@@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
return node.(JSONSetable).JSONSet(decodedToken, data)
}
- switch rValue.Kind() {
+ switch rValue.Kind() { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
}
-func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
@@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
if err != nil {
return nil, knd, err
}
- node, kind = r, knd
-
+ node = r
}
rValue := reflect.ValueOf(node)
@@ -241,11 +260,11 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
-func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
- return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {
@@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
continue
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -363,6 +382,128 @@ func (p *Pointer) String() string {
return pointerString
}
+func (p *Pointer) Offset(document string) (int64, error) {
+ dec := json.NewDecoder(strings.NewReader(document))
+ var offset int64
+ for _, ttk := range p.DecodedTokens() {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ offset, err = offsetSingleObject(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ case '[':
+ offset, err = offsetSingleArray(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return offset, nil
+}
+
+func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
+ for dec.More() {
+ offset := dec.InputOffset()
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ case string:
+ if tk == decodedToken {
+ return offset, nil
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+}
+
+func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
+ idx, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
+ }
+ var i int
+ for i = 0; i < idx && dec.More(); i++ {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ if !dec.More() {
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+ }
+ return dec.InputOffset(), nil
+}
+
+// drainSingle drains a single level of object or array.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
+func drainSingle(dec *json.Decoder) error {
+ for dec.More() {
+ tk, err := dec.Token()
+ if err != nil {
+ return err
+ }
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // Consumes the ending delim
+ if _, err := dec.Token(); err != nil {
+ return err
+ }
+ return nil
+}
+
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
@@ -377,14 +518,14 @@ const (
// Unescape unescapes a json pointer reference token string to the original representation
func Unescape(token string) string {
- step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
- step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
return step2
}
// Escape escapes a pointer reference token string
func Escape(token string) string {
- step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
- step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
return step2
}
diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/test/tools/vendor/github.com/go-openapi/jsonreference/.golangci.yml
index 013fc1943a9..22f8d21cca1 100644
--- a/test/tools/vendor/github.com/go-openapi/jsonreference/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/jsonreference/.golangci.yml
@@ -1,50 +1,61 @@
linters-settings:
govet:
check-shadowing: true
+ golint:
+ min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
- paralleltest:
- ignore-missing: true
+ min-occurrences: 3
+
linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
+ - gochecknoinits
- gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
- - varcheck
- - interfacer
- - deadcode
- - golint
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
- ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
- structcheck
+ - golint
- nosnakecase
- - varnamelen
- - exhaustruct
diff --git a/test/tools/vendor/github.com/go-openapi/jsonreference/README.md b/test/tools/vendor/github.com/go-openapi/jsonreference/README.md
index b94753aa527..c7fc2049c1d 100644
--- a/test/tools/vendor/github.com/go-openapi/jsonreference/README.md
+++ b/test/tools/vendor/github.com/go-openapi/jsonreference/README.md
@@ -1,15 +1,19 @@
-# gojsonreference [](https://travis-ci.org/go-openapi/jsonreference) [](https://codecov.io/gh/go-openapi/jsonreference) [](https://slackin.goswagger.io)
+# gojsonreference [](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonreference)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonreference)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonreference)
-[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonreference)
An implementation of JSON Reference - Go language
## Status
Feature complete. Stable API
## Dependencies
-https://github.com/go-openapi/jsonpointer
+* https://github.com/go-openapi/jsonpointer
## References
-http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
-http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
+* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/test/tools/vendor/github.com/go-openapi/loads/.golangci.yml b/test/tools/vendor/github.com/go-openapi/loads/.golangci.yml
index d48b4a5156e..22f8d21cca1 100644
--- a/test/tools/vendor/github.com/go-openapi/loads/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/loads/.golangci.yml
@@ -4,41 +4,58 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
- - gochecknoglobals
- gochecknoinits
+ - gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
+ - gofumpt
- paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/loads/README.md b/test/tools/vendor/github.com/go-openapi/loads/README.md
index df1f6264623..f8bd440dfc2 100644
--- a/test/tools/vendor/github.com/go-openapi/loads/README.md
+++ b/test/tools/vendor/github.com/go-openapi/loads/README.md
@@ -1,4 +1,4 @@
-# Loads OAI specs [](https://travis-ci.org/go-openapi/loads) [](https://codecov.io/gh/go-openapi/loads) [](https://slackin.goswagger.io) [](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test")
+# Loads OAI specs [](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/loads)
[](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [](http://godoc.org/github.com/go-openapi/loads)
[](https://goreportcard.com/report/github.com/go-openapi/loads)
diff --git a/test/tools/vendor/github.com/go-openapi/loads/doc.go b/test/tools/vendor/github.com/go-openapi/loads/doc.go
index 3046da4cef3..5bcaef5dbcc 100644
--- a/test/tools/vendor/github.com/go-openapi/loads/doc.go
+++ b/test/tools/vendor/github.com/go-openapi/loads/doc.go
@@ -12,10 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-/*
-Package loads provides document loading methods for swagger (OAI) specifications.
-
-It is used by other go-openapi packages to load and run analysis on local or remote spec documents.
-
-*/
+// Package loads provides document loading methods for swagger (OAI) specifications.
+//
+// It is used by other go-openapi packages to load and run analysis on local or remote spec documents.
package loads
diff --git a/test/tools/vendor/github.com/go-openapi/loads/loaders.go b/test/tools/vendor/github.com/go-openapi/loads/loaders.go
index 44bd32b5b88..b2d1e034c52 100644
--- a/test/tools/vendor/github.com/go-openapi/loads/loaders.go
+++ b/test/tools/vendor/github.com/go-openapi/loads/loaders.go
@@ -21,7 +21,7 @@ var (
func init() {
jsonLoader := &loader{
DocLoaderWithMatch: DocLoaderWithMatch{
- Match: func(pth string) bool {
+ Match: func(_ string) bool {
return true
},
Fn: JSONDoc,
@@ -86,7 +86,7 @@ func (l *loader) Load(path string) (json.RawMessage, error) {
return nil, erp
}
- var lastErr error = errors.New("no loader matched") // default error if no match was found
+ lastErr := errors.New("no loader matched") // default error if no match was found
for ldr := l; ldr != nil; ldr = ldr.Next {
if ldr.Match != nil && !ldr.Match(path) {
continue
@@ -118,9 +118,8 @@ func JSONDoc(path string) (json.RawMessage, error) {
// This sets the configuration at the package level.
//
// NOTE:
-// * this updates the default loader used by github.com/go-openapi/spec
-// * since this sets package level globals, you shouln't call this concurrently
-//
+// - this updates the default loader used by github.com/go-openapi/spec
+// - since this sets package level globals, you shouln't call this concurrently
func AddLoader(predicate DocMatcher, load DocLoader) {
loaders = loaders.WithHead(&loader{
DocLoaderWithMatch: DocLoaderWithMatch{
diff --git a/test/tools/vendor/github.com/go-openapi/loads/spec.go b/test/tools/vendor/github.com/go-openapi/loads/spec.go
index 93c8d4b8955..c9039cd5d7e 100644
--- a/test/tools/vendor/github.com/go-openapi/loads/spec.go
+++ b/test/tools/vendor/github.com/go-openapi/loads/spec.go
@@ -38,8 +38,8 @@ type Document struct {
specFilePath string
origSpec *spec.Swagger
schema *spec.Schema
- raw json.RawMessage
pathLoader *loader
+ raw json.RawMessage
}
// JSONSpec loads a spec from a json document
@@ -49,7 +49,14 @@ func JSONSpec(path string, options ...LoaderOption) (*Document, error) {
return nil, err
}
// convert to json
- return Analyzed(data, "", options...)
+ doc, err := Analyzed(data, "", options...)
+ if err != nil {
+ return nil, err
+ }
+
+ doc.specFilePath = path
+
+ return doc, nil
}
// Embedded returns a Document based on embedded specs. No analysis is required
@@ -71,7 +78,6 @@ func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, e
// Spec loads a new spec document from a local or remote path
func Spec(path string, options ...LoaderOption) (*Document, error) {
-
ldr := loaderFromOptions(options)
b, err := ldr.Load(path)
@@ -84,12 +90,10 @@ func Spec(path string, options ...LoaderOption) (*Document, error) {
return nil, err
}
- if document != nil {
- document.specFilePath = path
- document.pathLoader = ldr
- }
+ document.specFilePath = path
+ document.pathLoader = ldr
- return document, err
+ return document, nil
}
// Analyzed creates a new analyzed spec document for a root json.RawMessage.
@@ -117,7 +121,7 @@ func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*D
}
d := &Document{
- Analyzer: analysis.New(swspec),
+ Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc
schema: spec.MustLoadSwagger20Schema(),
spec: swspec,
raw: raw,
@@ -152,9 +156,8 @@ func trimData(in json.RawMessage) (json.RawMessage, error) {
return d, nil
}
-// Expanded expands the ref fields in the spec document and returns a new spec document
+// Expanded expands the $ref fields in the spec document and returns a new spec document
func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
-
swspec := new(spec.Swagger)
if err := json.Unmarshal(d.raw, swspec); err != nil {
return nil, err
@@ -163,6 +166,9 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
var expandOptions *spec.ExpandOptions
if len(options) > 0 {
expandOptions = options[0]
+ if expandOptions.RelativeBase == "" {
+ expandOptions.RelativeBase = d.specFilePath
+ }
} else {
expandOptions = &spec.ExpandOptions{
RelativeBase: d.specFilePath,
@@ -194,7 +200,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
return dd, nil
}
-// BasePath the base path for this spec
+// BasePath the base path for the API specified by this spec
func (d *Document) BasePath() string {
return d.spec.BasePath
}
@@ -242,8 +248,11 @@ func (d *Document) ResetDefinitions() *Document {
// Pristine creates a new pristine document instance based on the input data
func (d *Document) Pristine() *Document {
- dd, _ := Analyzed(d.Raw(), d.Version())
+ raw, _ := json.Marshal(d.Spec())
+ dd, _ := Analyzed(raw, d.Version())
dd.pathLoader = d.pathLoader
+ dd.specFilePath = d.specFilePath
+
return dd
}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/.golangci.yml b/test/tools/vendor/github.com/go-openapi/runtime/.golangci.yml
index b1aa7928a7c..1c75557bac5 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/runtime/.golangci.yml
@@ -1,44 +1,62 @@
linters-settings:
govet:
- # Using err repeatedly considered as shadowing.
- check-shadowing: false
+ check-shadowing: true
golint:
min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
+
linters:
+ enable-all: true
disable:
+ - nilerr # nilerr crashes on this repo
- maligned
+ - unparam
- lll
+ - gochecknoinits
- gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
- - noctx
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
- interfacer
- - nilerr
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/README.md b/test/tools/vendor/github.com/go-openapi/runtime/README.md
index 5b1ec649454..b07e0ad9d62 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/README.md
+++ b/test/tools/vendor/github.com/go-openapi/runtime/README.md
@@ -1,7 +1,10 @@
-# runtime [](https://travis-ci.org/go-openapi/runtime) [](https://codecov.io/gh/go-openapi/runtime) [](https://slackin.goswagger.io)
+# runtime [](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/runtime)
-[](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [](http://godoc.org/github.com/go-openapi/runtime)
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/runtime)
+[](https://goreportcard.com/report/github.com/go-openapi/runtime)
-# golang Open-API toolkit - runtime
+# go OpenAPI toolkit runtime
-The runtime component for use in codegeneration or as untyped usage.
+The runtime component for use in code generation or as untyped usage.
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/bytestream.go b/test/tools/vendor/github.com/go-openapi/runtime/bytestream.go
index 6eb6ceb5c5d..f8fb482232b 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/bytestream.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/bytestream.go
@@ -38,9 +38,16 @@ type byteStreamOpts struct {
Close bool
}
-// ByteStreamConsumer creates a consumer for byte streams,
-// takes a Writer/BinaryUnmarshaler interface or binary slice by reference,
-// and reads from the provided reader
+// ByteStreamConsumer creates a consumer for byte streams.
+//
+// The consumer consumes from a provided reader into the data passed by reference.
+//
+// Supported output underlying types and interfaces, prioritized in this order:
+// - io.ReaderFrom (for maximum control)
+// - io.Writer (performs io.Copy)
+// - encoding.BinaryUnmarshaler
+// - *string
+// - *[]byte
func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
var vals byteStreamOpts
for _, opt := range opts {
@@ -51,44 +58,70 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
if reader == nil {
return errors.New("ByteStreamConsumer requires a reader") // early exit
}
+ if data == nil {
+ return errors.New("nil destination for ByteStreamConsumer")
+ }
- close := defaultCloser
+ closer := defaultCloser
if vals.Close {
- if cl, ok := reader.(io.Closer); ok {
- close = cl.Close
+ if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
+ closer = cl.Close
}
}
- //nolint:errcheck // closing a reader wouldn't fail.
- defer close()
+ defer func() {
+ _ = closer()
+ }()
- if wrtr, ok := data.(io.Writer); ok {
- _, err := io.Copy(wrtr, reader)
+ if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom {
+ _, err := readerFrom.ReadFrom(reader)
return err
}
- buf := new(bytes.Buffer)
+ if writer, isDataWriter := data.(io.Writer); isDataWriter {
+ _, err := io.Copy(writer, reader)
+ return err
+ }
+
+ // buffers input before writing to data
+ var buf bytes.Buffer
_, err := buf.ReadFrom(reader)
if err != nil {
return err
}
b := buf.Bytes()
- if bu, ok := data.(encoding.BinaryUnmarshaler); ok {
- return bu.UnmarshalBinary(b)
- }
+ switch destinationPointer := data.(type) {
+ case encoding.BinaryUnmarshaler:
+ return destinationPointer.UnmarshalBinary(b)
+ case *any:
+ switch (*destinationPointer).(type) {
+ case string:
+ *destinationPointer = string(b)
+
+ return nil
+
+ case []byte:
+ *destinationPointer = b
- if data != nil {
- if str, ok := data.(*string); ok {
- *str = string(b)
return nil
}
- }
+ default:
+ // check for the underlying type to be pointer to []byte or string,
+ if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
+ return errors.New("destination must be a pointer")
+ }
- if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr {
v := reflect.Indirect(reflect.ValueOf(data))
- if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
v.SetBytes(b)
return nil
+
+ case t.Kind() == reflect.String:
+ v.SetString(string(b))
+ return nil
}
}
@@ -97,67 +130,87 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
})
}
-// ByteStreamProducer creates a producer for byte streams,
-// takes a Reader/BinaryMarshaler interface or binary slice,
-// and writes to a writer (essentially a pipe)
+// ByteStreamProducer creates a producer for byte streams.
+//
+// The producer takes input data then writes to an output writer (essentially as a pipe).
+//
+// Supported input underlying types and interfaces, prioritized in this order:
+// - io.WriterTo (for maximum control)
+// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting.
+// - encoding.BinaryMarshaler
+// - error (writes as a string)
+// - []byte
+// - string
+// - struct, other slices: writes as JSON
func ByteStreamProducer(opts ...byteStreamOpt) Producer {
var vals byteStreamOpts
for _, opt := range opts {
opt(&vals)
}
+
return ProducerFunc(func(writer io.Writer, data interface{}) error {
if writer == nil {
return errors.New("ByteStreamProducer requires a writer") // early exit
}
- close := defaultCloser
+ if data == nil {
+ return errors.New("nil data for ByteStreamProducer")
+ }
+
+ closer := defaultCloser
if vals.Close {
- if cl, ok := writer.(io.Closer); ok {
- close = cl.Close
+ if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
+ closer = cl.Close
}
}
- //nolint:errcheck // TODO: closing a writer would fail.
- defer close()
+ defer func() {
+ _ = closer()
+ }()
- if rc, ok := data.(io.ReadCloser); ok {
+ if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
defer rc.Close()
}
- if rdr, ok := data.(io.Reader); ok {
- _, err := io.Copy(writer, rdr)
+ switch origin := data.(type) {
+ case io.WriterTo:
+ _, err := origin.WriteTo(writer)
+ return err
+
+ case io.Reader:
+ _, err := io.Copy(writer, origin)
return err
- }
- if bm, ok := data.(encoding.BinaryMarshaler); ok {
- bytes, err := bm.MarshalBinary()
+ case encoding.BinaryMarshaler:
+ bytes, err := origin.MarshalBinary()
if err != nil {
return err
}
_, err = writer.Write(bytes)
return err
- }
-
- if data != nil {
- if str, ok := data.(string); ok {
- _, err := writer.Write([]byte(str))
- return err
- }
- if e, ok := data.(error); ok {
- _, err := writer.Write([]byte(e.Error()))
- return err
- }
+ case error:
+ _, err := writer.Write([]byte(origin.Error()))
+ return err
+ default:
v := reflect.Indirect(reflect.ValueOf(data))
- if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
_, err := writer.Write(v.Bytes())
return err
- }
- if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice {
+
+ case t.Kind() == reflect.String:
+ _, err := writer.Write([]byte(v.String()))
+ return err
+
+ case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice:
b, err := swag.WriteJSON(data)
if err != nil {
return err
}
+
_, err = writer.Write(b)
return err
}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/client_operation.go b/test/tools/vendor/github.com/go-openapi/runtime/client_operation.go
index fa21eacf330..5a5d63563ad 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/client_operation.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/client_operation.go
@@ -30,12 +30,12 @@ type ClientOperation struct {
AuthInfo ClientAuthInfoWriter
Params ClientRequestWriter
Reader ClientResponseReader
- Context context.Context
+ Context context.Context //nolint:containedctx // we precisely want this type to contain the request context
Client *http.Client
}
// A ClientTransport implementor knows how to submit Request objects to some destination
type ClientTransport interface {
- //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error)
+ // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error)
Submit(*ClientOperation) (interface{}, error)
}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/client_request.go b/test/tools/vendor/github.com/go-openapi/runtime/client_request.go
index d4d2b58f2bb..4ebb2deabe9 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/client_request.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/client_request.go
@@ -37,8 +37,8 @@ type ClientRequestWriter interface {
}
// ClientRequest is an interface for things that know how to
-// add information to a swagger client request
-type ClientRequest interface {
+// add information to a swagger client request.
+type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters
SetHeaderParam(string, ...string) error
GetHeaderParams() http.Header
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/csv.go b/test/tools/vendor/github.com/go-openapi/runtime/csv.go
index d807bd915b4..c9597bcd6e0 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/csv.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/csv.go
@@ -16,62 +16,335 @@ package runtime
import (
"bytes"
+ "context"
+ "encoding"
"encoding/csv"
"errors"
+ "fmt"
"io"
+ "reflect"
+
+ "golang.org/x/sync/errgroup"
)
-// CSVConsumer creates a new CSV consumer
-func CSVConsumer() Consumer {
+// CSVConsumer creates a new CSV consumer.
+//
+// The consumer consumes CSV records from a provided reader into the data passed by reference.
+//
+// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...).
+// The defaults are those of the standard library's csv.Reader and csv.Writer.
+//
+// Supported output underlying types and interfaces, prioritized in this order:
+// - *csv.Writer
+// - CSVWriter (writer options are ignored)
+// - io.Writer (as raw bytes)
+// - io.ReaderFrom (as raw bytes)
+// - encoding.BinaryUnmarshaler (as raw bytes)
+// - *[][]string (as a collection of records)
+// - *[]byte (as raw bytes)
+// - *string (a raw bytes)
+//
+// The consumer prioritizes situations where buffering the input is not required.
+func CSVConsumer(opts ...CSVOpt) Consumer {
+ o := csvOptsWithDefaults(opts)
+
return ConsumerFunc(func(reader io.Reader, data interface{}) error {
if reader == nil {
return errors.New("CSVConsumer requires a reader")
}
+ if data == nil {
+ return errors.New("nil destination for CSVConsumer")
+ }
csvReader := csv.NewReader(reader)
- writer, ok := data.(io.Writer)
- if !ok {
- return errors.New("data type must be io.Writer")
+ o.applyToReader(csvReader)
+ closer := defaultCloser
+ if o.closeStream {
+ if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
+ closer = cl.Close
+ }
}
- csvWriter := csv.NewWriter(writer)
- records, err := csvReader.ReadAll()
- if err != nil {
+ defer func() {
+ _ = closer()
+ }()
+
+ switch destination := data.(type) {
+ case *csv.Writer:
+ csvWriter := destination
+ o.applyToWriter(csvWriter)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case CSVWriter:
+ csvWriter := destination
+ // no writer options available
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.Writer:
+ csvWriter := csv.NewWriter(destination)
+ o.applyToWriter(csvWriter)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.ReaderFrom:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ _, err := destination.ReadFrom(&buf)
+
return err
- }
- for _, r := range records {
- if err := csvWriter.Write(r); err != nil {
+
+ case encoding.BinaryUnmarshaler:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
return err
}
+
+ return destination.UnmarshalBinary(buf.Bytes())
+
+ default:
+ // support *[][]string, *[]byte, *string
+ if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
+ return errors.New("destination must be a pointer")
+ }
+
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
+ csvWriter := &csvRecordsWriter{}
+ // writer options are ignored
+ if err := pipeCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+
+ v.Grow(len(csvWriter.records))
+ v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity
+ v.SetLen(len(csvWriter.records))
+ reflect.Copy(v, reflect.ValueOf(csvWriter.records))
+
+ return nil
+
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ v.SetBytes(buf.Bytes())
+
+ return nil
+
+ case t.Kind() == reflect.String:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ v.SetString(buf.String())
+
+ return nil
+
+ default:
+ return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s",
+ data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface",
+ )
+ }
}
- csvWriter.Flush()
- return nil
})
}
-// CSVProducer creates a new CSV producer
-func CSVProducer() Producer {
+// CSVProducer creates a new CSV producer.
+//
+// The producer takes input data then writes as CSV to an output writer (essentially as a pipe).
+//
+// Supported input underlying types and interfaces, prioritized in this order:
+// - *csv.Reader
+// - CSVReader (reader options are ignored)
+// - io.Reader
+// - io.WriterTo
+// - encoding.BinaryMarshaler
+// - [][]string
+// - []byte
+// - string
+//
+// The producer prioritizes situations where buffering the input is not required.
+func CSVProducer(opts ...CSVOpt) Producer {
+ o := csvOptsWithDefaults(opts)
+
return ProducerFunc(func(writer io.Writer, data interface{}) error {
if writer == nil {
return errors.New("CSVProducer requires a writer")
}
+ if data == nil {
+ return errors.New("nil data for CSVProducer")
+ }
- dataBytes, ok := data.([]byte)
- if !ok {
- return errors.New("data type must be byte array")
+ csvWriter := csv.NewWriter(writer)
+ o.applyToWriter(csvWriter)
+ closer := defaultCloser
+ if o.closeStream {
+ if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
+ closer = cl.Close
+ }
}
+ defer func() {
+ _ = closer()
+ }()
- csvReader := csv.NewReader(bytes.NewBuffer(dataBytes))
- records, err := csvReader.ReadAll()
- if err != nil {
- return err
+ if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
+ defer rc.Close()
}
- csvWriter := csv.NewWriter(writer)
- for _, r := range records {
- if err := csvWriter.Write(r); err != nil {
+
+ switch origin := data.(type) {
+ case *csv.Reader:
+ csvReader := origin
+ o.applyToReader(csvReader)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case CSVReader:
+ csvReader := origin
+ // no reader options available
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.Reader:
+ csvReader := csv.NewReader(origin)
+ o.applyToReader(csvReader)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.WriterTo:
+ // async piping of the writes performed by WriteTo
+ r, w := io.Pipe()
+ csvReader := csv.NewReader(r)
+ o.applyToReader(csvReader)
+
+ pipe, _ := errgroup.WithContext(context.Background())
+ pipe.Go(func() error {
+ _, err := origin.WriteTo(w)
+ _ = w.Close()
+ return err
+ })
+
+ pipe.Go(func() error {
+ defer func() {
+ _ = r.Close()
+ }()
+
+ return pipeCSV(csvWriter, csvReader, o)
+ })
+
+ return pipe.Wait()
+
+ case encoding.BinaryMarshaler:
+ buf, err := origin.MarshalBinary()
+ if err != nil {
return err
}
+ rdr := bytes.NewBuffer(buf)
+ csvReader := csv.NewReader(rdr)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ default:
+ // support [][]string, []byte, string (or pointers to those)
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
+ csvReader := &csvRecordsWriter{
+ records: make([][]string, v.Len()),
+ }
+ reflect.Copy(reflect.ValueOf(csvReader.records), v)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ buf := bytes.NewBuffer(v.Bytes())
+ csvReader := csv.NewReader(buf)
+ o.applyToReader(csvReader)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ case t.Kind() == reflect.String:
+ buf := bytes.NewBufferString(v.String())
+ csvReader := csv.NewReader(buf)
+ o.applyToReader(csvReader)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ default:
+ return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s",
+ data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface",
+ )
+ }
}
- csvWriter.Flush()
- return nil
})
}
+
+// pipeCSV copies CSV records from a CSV reader to a CSV writer
+func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error {
+ for ; opts.skippedLines > 0; opts.skippedLines-- {
+ _, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ for {
+ record, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ return err
+ }
+
+ if err := csvWriter.Write(record); err != nil {
+ return err
+ }
+ }
+
+ csvWriter.Flush()
+
+ return csvWriter.Error()
+}
+
+// bufferedCSV copies CSV records from a CSV reader to a CSV writer,
+// by first reading all records then writing them at once.
+func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error {
+ for ; opts.skippedLines > 0; opts.skippedLines-- {
+ _, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ records, err := csvReader.ReadAll()
+ if err != nil {
+ return err
+ }
+
+ return csvWriter.WriteAll(records)
+}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/csv_options.go b/test/tools/vendor/github.com/go-openapi/runtime/csv_options.go
new file mode 100644
index 00000000000..c16464c5784
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/runtime/csv_options.go
@@ -0,0 +1,121 @@
+package runtime
+
+import (
+ "encoding/csv"
+ "io"
+)
+
+// CSVOpts alter the behavior of the CSV consumer or producer.
+type CSVOpt func(*csvOpts)
+
+type csvOpts struct {
+ csvReader csv.Reader
+ csvWriter csv.Writer
+ skippedLines int
+ closeStream bool
+}
+
+// WithCSVReaderOpts specifies the options to csv.Reader
+// when reading CSV.
+func WithCSVReaderOpts(reader csv.Reader) CSVOpt {
+ return func(o *csvOpts) {
+ o.csvReader = reader
+ }
+}
+
+// WithCSVWriterOpts specifies the options to csv.Writer
+// when writing CSV.
+func WithCSVWriterOpts(writer csv.Writer) CSVOpt {
+ return func(o *csvOpts) {
+ o.csvWriter = writer
+ }
+}
+
+// WithCSVSkipLines will skip header lines.
+func WithCSVSkipLines(skipped int) CSVOpt {
+ return func(o *csvOpts) {
+ o.skippedLines = skipped
+ }
+}
+
+func WithCSVClosesStream() CSVOpt {
+ return func(o *csvOpts) {
+ o.closeStream = true
+ }
+}
+
+func (o csvOpts) applyToReader(in *csv.Reader) {
+ if o.csvReader.Comma != 0 {
+ in.Comma = o.csvReader.Comma
+ }
+ if o.csvReader.Comment != 0 {
+ in.Comment = o.csvReader.Comment
+ }
+ if o.csvReader.FieldsPerRecord != 0 {
+ in.FieldsPerRecord = o.csvReader.FieldsPerRecord
+ }
+
+ in.LazyQuotes = o.csvReader.LazyQuotes
+ in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace
+ in.ReuseRecord = o.csvReader.ReuseRecord
+}
+
+func (o csvOpts) applyToWriter(in *csv.Writer) {
+ if o.csvWriter.Comma != 0 {
+ in.Comma = o.csvWriter.Comma
+ }
+ in.UseCRLF = o.csvWriter.UseCRLF
+}
+
+func csvOptsWithDefaults(opts []CSVOpt) csvOpts {
+ var o csvOpts
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+type CSVWriter interface {
+ Write([]string) error
+ Flush()
+ Error() error
+}
+
+type CSVReader interface {
+ Read() ([]string, error)
+}
+
+var (
+ _ CSVWriter = &csvRecordsWriter{}
+ _ CSVReader = &csvRecordsWriter{}
+)
+
+// csvRecordsWriter is an internal container to move CSV records back and forth
+type csvRecordsWriter struct {
+ i int
+ records [][]string
+}
+
+func (w *csvRecordsWriter) Write(record []string) error {
+ w.records = append(w.records, record)
+
+ return nil
+}
+
+func (w *csvRecordsWriter) Read() ([]string, error) {
+ if w.i >= len(w.records) {
+ return nil, io.EOF
+ }
+ defer func() {
+ w.i++
+ }()
+
+ return w.records[w.i], nil
+}
+
+func (w *csvRecordsWriter) Flush() {}
+
+func (w *csvRecordsWriter) Error() error {
+ return nil
+}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/logger/standard.go b/test/tools/vendor/github.com/go-openapi/runtime/logger/standard.go
index f7e67ebb9e7..30035a77770 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/logger/standard.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/logger/standard.go
@@ -5,6 +5,8 @@ import (
"os"
)
+var _ Logger = StandardLogger{}
+
type StandardLogger struct{}
func (StandardLogger) Printf(format string, args ...interface{}) {
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/context.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/context.go
index d21ae4e870c..44cecf1181e 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/context.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/context.go
@@ -18,6 +18,8 @@ import (
stdContext "context"
"fmt"
"net/http"
+ "net/url"
+ "path"
"strings"
"sync"
@@ -35,12 +37,21 @@ import (
// Debug when true turns on verbose logging
var Debug = logger.DebugEnabled()
+
+// Logger is the standard libray logger used for printing debug messages
var Logger logger.Logger = logger.StandardLogger{}
-func debugLog(format string, args ...interface{}) {
- if Debug {
- Logger.Printf(format, args...)
+func debugLogfFunc(lg logger.Logger) func(string, ...any) {
+ if logger.DebugEnabled() {
+ if lg == nil {
+ return Logger.Debugf
+ }
+
+ return lg.Debugf
}
+
+ // muted logger
+ return func(_ string, _ ...any) {}
}
// A Builder can create middlewares
@@ -73,10 +84,11 @@ func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Produce
// used throughout to store request context with the standard context attached
// to the http.Request
type Context struct {
- spec *loads.Document
- analyzer *analysis.Spec
- api RoutableAPI
- router Router
+ spec *loads.Document
+ analyzer *analysis.Spec
+ api RoutableAPI
+ router Router
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
type routableUntypedAPI struct {
@@ -162,7 +174,7 @@ func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool
r.hlock.Unlock()
return handler, ok
}
-func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) {
+func (r *routableUntypedAPI) ServeErrorFor(_ string) func(http.ResponseWriter, *http.Request, error) {
return r.api.ServeError
}
func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer {
@@ -189,7 +201,9 @@ func (r *routableUntypedAPI) DefaultConsumes() string {
return r.defaultConsumes
}
-// NewRoutableContext creates a new context for a routable API
+// NewRoutableContext creates a new context for a routable API.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context {
var an *analysis.Spec
if spec != nil {
@@ -199,26 +213,40 @@ func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Ro
return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes)
}
-// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes in input the analysed spec too
+// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes as input an already analysed spec.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context {
// Either there are no spec doc and analysis, or both of them.
if !((spec == nil && an == nil) || (spec != nil && an != nil)) {
panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them"))
}
- ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes}
- return ctx
+ return &Context{
+ spec: spec,
+ api: routableAPI,
+ analyzer: an,
+ router: routes,
+ debugLogf: debugLogfFunc(nil),
+ }
}
-// NewContext creates a new context wrapper
+// NewContext creates a new context wrapper.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context {
var an *analysis.Spec
if spec != nil {
an = analysis.New(spec.Spec())
}
- ctx := &Context{spec: spec, analyzer: an}
+ ctx := &Context{
+ spec: spec,
+ analyzer: an,
+ router: routes,
+ debugLogf: debugLogfFunc(nil),
+ }
ctx.api = newRoutableUntypedAPI(spec, api, ctx)
- ctx.router = routes
+
return ctx
}
@@ -282,6 +310,13 @@ func (c *Context) BasePath() string {
return c.spec.BasePath()
}
+// SetLogger allows for injecting a logger to catch debug entries.
+//
+// The logger is enabled in DEBUG mode only.
+func (c *Context) SetLogger(lg logger.Logger) {
+ c.debugLogf = debugLogfFunc(lg)
+}
+
// RequiredProduces returns the accepted content types for responses
func (c *Context) RequiredProduces() []string {
return c.analyzer.RequiredProduces()
@@ -299,6 +334,7 @@ func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, b
if err != nil {
res = append(res, err)
} else {
+ c.debugLogf("validating content type for %q against [%s]", ct, strings.Join(route.Consumes, ", "))
if err := validateContentType(route.Consumes, ct); err != nil {
res = append(res, err)
}
@@ -397,16 +433,16 @@ func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *htt
var rCtx = r.Context()
if v, ok := rCtx.Value(ctxResponseFormat).(string); ok {
- debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
+ c.debugLogf("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
return v, r
}
format := NegotiateContentType(r, offers, "")
if format != "" {
- debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
+ c.debugLogf("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format))
}
- debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
+ c.debugLogf("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
return format, r
}
@@ -469,7 +505,7 @@ func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute)
var rCtx = request.Context()
if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok {
- debugLog("got cached validation (valid: %t)", len(v.result) == 0)
+ c.debugLogf("got cached validation (valid: %t)", len(v.result) == 0)
if len(v.result) > 0 {
return v.bound, request, errors.CompositeValidationError(v.result...)
}
@@ -481,7 +517,7 @@ func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute)
if len(result.result) > 0 {
return result.bound, request, errors.CompositeValidationError(result.result...)
}
- debugLog("no validation errors found")
+ c.debugLogf("no validation errors found")
return result.bound, request, nil
}
@@ -492,7 +528,7 @@ func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) {
// Respond renders the response after doing some content negotiation
func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) {
- debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
+ c.debugLogf("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
offers := []string{}
for _, mt := range produces {
if mt != c.api.DefaultProduces() {
@@ -501,7 +537,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
}
// the default producer is last so more specific producers take precedence
offers = append(offers, c.api.DefaultProduces())
- debugLog("offers: %v", offers)
+ c.debugLogf("offers: %v", offers)
var format string
format, r = c.ResponseFormat(r, offers)
@@ -516,7 +552,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
pr, ok := prods[c.api.DefaultProduces()]
if !ok {
- panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
}
prod = pr
}
@@ -542,14 +578,14 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
}
if route == nil || route.Operation == nil {
- rw.WriteHeader(200)
- if r.Method == "HEAD" {
+ rw.WriteHeader(http.StatusOK)
+ if r.Method == http.MethodHead {
return
}
producers := c.api.ProducersFor(normalizeOffers(offers))
prod, ok := producers[format]
if !ok {
- panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
}
if err := prod.Produce(rw, data); err != nil {
panic(err) // let the recovery middleware deal with this
@@ -559,7 +595,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
if _, code, ok := route.Operation.SuccessResponse(); ok {
rw.WriteHeader(code)
- if code == 204 || r.Method == "HEAD" {
+ if code == http.StatusNoContent || r.Method == http.MethodHead {
return
}
@@ -570,7 +606,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
pr, ok := prods[c.api.DefaultProduces()]
if !ok {
- panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
}
prod = pr
}
@@ -584,45 +620,92 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response"))
}
-func (c *Context) APIHandlerSwaggerUI(builder Builder) http.Handler {
+// APIHandlerSwaggerUI returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (SwaggerUI) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandlerSwaggerUI(builder Builder, opts ...UIOption) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
- var title string
- sp := c.spec.Spec()
- if sp != nil && sp.Info != nil && sp.Info.Title != "" {
- title = sp.Info.Title
- }
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var swaggerUIOpts SwaggerUIOpts
+ fromCommonToAnyOptions(uiOpts, &swaggerUIOpts)
+
+ return Spec(specPath, c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)), specOpts...)
+}
- swaggerUIOpts := SwaggerUIOpts{
- BasePath: c.BasePath(),
- Title: title,
+// APIHandlerRapiDoc returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (RapiDoc) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandlerRapiDoc(builder Builder, opts ...UIOption) http.Handler {
+ b := builder
+ if b == nil {
+ b = PassthroughBuilder
}
- return Spec("", c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)))
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var rapidocUIOpts RapiDocOpts
+ fromCommonToAnyOptions(uiOpts, &rapidocUIOpts)
+
+ return Spec(specPath, c.spec.Raw(), RapiDoc(rapidocUIOpts, c.RoutesHandler(b)), specOpts...)
}
-// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec
-func (c *Context) APIHandler(builder Builder) http.Handler {
+// APIHandler returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (Redoc) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandler(builder Builder, opts ...UIOption) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var redocOpts RedocOpts
+ fromCommonToAnyOptions(uiOpts, &redocOpts)
+
+ return Spec(specPath, c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)), specOpts...)
+}
+
+func (c Context) uiOptionsForHandler(opts []UIOption) (string, uiOptions, []SpecOption) {
var title string
sp := c.spec.Spec()
if sp != nil && sp.Info != nil && sp.Info.Title != "" {
title = sp.Info.Title
}
- redocOpts := RedocOpts{
- BasePath: c.BasePath(),
- Title: title,
+ // default options (may be overridden)
+ optsForContext := []UIOption{
+ WithUIBasePath(c.BasePath()),
+ WithUITitle(title),
+ }
+ optsForContext = append(optsForContext, opts...)
+ uiOpts := uiOptionsWithDefaults(optsForContext)
+
+ // If spec URL is provided, there is a non-default path to serve the spec.
+ // This makes sure that the UI middleware is aligned with the Spec middleware.
+ u, _ := url.Parse(uiOpts.SpecURL)
+ var specPath string
+ if u != nil {
+ specPath = u.Path
+ }
+
+ pth, doc := path.Split(specPath)
+ if pth == "." {
+ pth = ""
}
- return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)))
+ return pth, uiOpts, []SpecOption{WithSpecDocument(doc)}
}
// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec
@@ -633,3 +716,7 @@ func (c *Context) RoutesHandler(builder Builder) http.Handler {
}
return NewRouter(c, b(NewOperationExecutor(c)))
}
+
+func cantFindProducer(format string) string {
+ return "can't find a producer for " + format
+}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
index 5d2691ec369..4377f77a466 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
@@ -2,6 +2,7 @@
package denco
import (
+ "errors"
"fmt"
"sort"
"strings"
@@ -29,13 +30,13 @@ const (
// Router represents a URL router.
type Router struct {
+ param *doubleArray
// SizeHint expects the maximum number of path parameters in records to Build.
// SizeHint will be used to determine the capacity of the memory to allocate.
// By default, SizeHint will be determined from given records to Build.
SizeHint int
static map[string]interface{}
- param *doubleArray
}
// New returns a new Router.
@@ -51,7 +52,7 @@ func New() *Router {
// params is a slice of the Param that arranged in the order in which parameters appeared.
// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}].
func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) {
- if data, found := rt.static[path]; found {
+ if data, found = rt.static[path]; found {
return data, nil, true
}
if len(rt.param.node) == 1 {
@@ -71,7 +72,7 @@ func (rt *Router) Lookup(path string) (data interface{}, params Params, found bo
func (rt *Router) Build(records []Record) error {
statics, params := makeRecords(records)
if len(params) > MaxSize {
- return fmt.Errorf("denco: too many records")
+ return errors.New("denco: too many records")
}
if rt.SizeHint < 0 {
rt.SizeHint = 0
@@ -131,7 +132,8 @@ func newDoubleArray() *doubleArray {
// baseCheck contains BASE, CHECK and Extra flags.
// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK.
//
-// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
+// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
+//
// |----------------------|--|--------|
// 32 10 8 0
type baseCheck uint32
@@ -196,24 +198,29 @@ func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Pa
if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter {
return da.node[da.bc[next].Base()], params, true
}
+
BACKTRACKING:
for j := len(indices) - 1; j >= 0; j-- {
i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff)
if da.bc[idx].IsSingleParam() {
- idx := nextIndex(da.bc[idx].Base(), ParamCharacter)
- if idx >= len(da.bc) {
+ nextIdx := nextIndex(da.bc[idx].Base(), ParamCharacter)
+ if nextIdx >= len(da.bc) {
break
}
+
next := NextSeparator(path, i)
- params := append(params, Param{Value: path[i:next]})
- if nd, params, found := da.lookup(path[next:], params, idx); found {
- return nd, params, true
+ nextParams := params
+ nextParams = append(nextParams, Param{Value: path[i:next]})
+ if nd, nextNextParams, found := da.lookup(path[next:], nextParams, nextIdx); found {
+ return nd, nextNextParams, true
}
}
+
if da.bc[idx].IsWildcardParam() {
- idx := nextIndex(da.bc[idx].Base(), WildcardCharacter)
- params := append(params, Param{Value: path[i:]})
- return da.node[da.bc[idx].Base()], params, true
+ nextIdx := nextIndex(da.bc[idx].Base(), WildcardCharacter)
+ nextParams := params
+ nextParams = append(nextParams, Param{Value: path[i:]})
+ return da.node[da.bc[nextIdx].Base()], nextParams, true
}
}
return nil, nil, false
@@ -325,7 +332,7 @@ func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[i
}
base = da.findBase(siblings, idx, usedBase)
if base > MaxSize {
- return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice")
+ return -1, nil, nil, errors.New("denco: too many elements of internal slice")
}
da.setBase(idx, base)
return base, siblings, leaf, err
@@ -386,7 +393,7 @@ func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, er
case pc == c:
continue
default:
- return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted")
+ return nil, nil, errors.New("denco: BUG: routing table hasn't been sorted")
}
if n > 0 {
sib[n-1].end = i
@@ -431,7 +438,7 @@ func makeRecords(srcs []Record) (statics, params []*record) {
wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter)
restconfPrefix := string(PathParamCharacter) + string(ParamCharacter)
for _, r := range srcs {
- if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) ||strings.Contains(r.Key, restconfPrefix){
+ if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) || strings.Contains(r.Key, restconfPrefix) {
r.Key += termChar
params = append(params, &record{Record: r})
} else {
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/doc.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/doc.go
index eaf90606ac3..836a98850d7 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/doc.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/doc.go
@@ -12,51 +12,52 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-/*Package middleware provides the library with helper functions for serving swagger APIs.
+/*
+Package middleware provides the library with helper functions for serving swagger APIs.
Pseudo middleware handler
- import (
- "net/http"
-
- "github.com/go-openapi/errors"
- )
-
- func newCompleteMiddleware(ctx *Context) http.Handler {
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- // use context to lookup routes
- if matched, ok := ctx.RouteInfo(r); ok {
-
- if matched.NeedsAuth() {
- if _, err := ctx.Authorize(r, matched); err != nil {
- ctx.Respond(rw, r, matched.Produces, matched, err)
- return
- }
- }
-
- bound, validation := ctx.BindAndValidate(r, matched)
- if validation != nil {
- ctx.Respond(rw, r, matched.Produces, matched, validation)
- return
- }
-
- result, err := matched.Handler.Handle(bound)
- if err != nil {
- ctx.Respond(rw, r, matched.Produces, matched, err)
- return
- }
-
- ctx.Respond(rw, r, matched.Produces, matched, result)
- return
- }
-
- // Not found, check if it exists in the other methods first
- if others := ctx.AllowedMethods(r); len(others) > 0 {
- ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
- return
- }
- ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path))
- })
- }
+ import (
+ "net/http"
+
+ "github.com/go-openapi/errors"
+ )
+
+ func newCompleteMiddleware(ctx *Context) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ // use context to lookup routes
+ if matched, ok := ctx.RouteInfo(r); ok {
+
+ if matched.NeedsAuth() {
+ if _, err := ctx.Authorize(r, matched); err != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, err)
+ return
+ }
+ }
+
+ bound, validation := ctx.BindAndValidate(r, matched)
+ if validation != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, validation)
+ return
+ }
+
+ result, err := matched.Handler.Handle(bound)
+ if err != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, err)
+ return
+ }
+
+ ctx.Respond(rw, r, matched.Produces, matched, result)
+ return
+ }
+
+ // Not found, check if it exists in the other methods first
+ if others := ctx.AllowedMethods(r); len(others) > 0 {
+ ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
+ return
+ }
+ ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path))
+ })
+ }
*/
package middleware
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/go18.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/go18.go
deleted file mode 100644
index 75c762c0948..00000000000
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/go18.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.8
-
-package middleware
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.PathUnescape(path)
-}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/header/header.go
index e069743e30a..df073c87d98 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/header/header.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/header/header.go
@@ -195,7 +195,8 @@ func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) {
}
// ParseAccept parses Accept* headers.
-func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+func ParseAccept(header http.Header, key string) []AcceptSpec {
+ var specs []AcceptSpec
loop:
for _, s := range header[key] {
for {
@@ -218,6 +219,7 @@ loop:
}
}
}
+
specs = append(specs, spec)
s = skipSpace(s)
if !strings.HasPrefix(s, ",") {
@@ -226,7 +228,8 @@ loop:
s = skipSpace(s[1:])
}
}
- return
+
+ return specs
}
func skipSpace(s string) (rest string) {
@@ -306,7 +309,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) {
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
- for i = i + 1; i < len(s); i++ {
+ for i++; i < len(s); i++ {
b := s[i]
switch {
case escape:
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/parameter.go
index 9aaf65958ad..9c3353a95c9 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/parameter.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/parameter.go
@@ -34,6 +34,11 @@ import (
const defaultMaxMemory = 32 << 20
+const (
+ typeString = "string"
+ typeArray = "array"
+)
+
var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder {
@@ -66,7 +71,7 @@ func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items
case "boolean":
return reflect.TypeOf(true)
- case "string":
+ case typeString:
if tt, ok := p.formats.GetType(format); ok {
return tt
}
@@ -94,7 +99,7 @@ func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items
return reflect.TypeOf(float64(0))
}
- case "array":
+ case typeArray:
if items == nil {
return nil
}
@@ -119,7 +124,7 @@ func (p *untypedParamBinder) allowsMulti() bool {
func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) {
name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type
- if tpe == "array" {
+ if tpe == typeArray {
if cf == "multi" {
if !p.allowsMulti() {
return nil, false, false, errors.InvalidCollectionFormat(name, in, cf)
@@ -208,10 +213,11 @@ func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams
if ffErr != nil {
if p.parameter.Required {
return errors.NewParseError(p.Name, p.parameter.In, "", ffErr)
- } else {
- return nil
}
+
+ return nil
}
+
target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header}))
return nil
}
@@ -263,7 +269,7 @@ func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams
}
func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error {
- if p.parameter.Type == "array" {
+ if p.parameter.Type == typeArray {
return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey)
}
var d string
@@ -273,7 +279,7 @@ func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflec
return p.setFieldValue(target, p.parameter.Default, d, hasKey)
}
-func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error {
+func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { //nolint:gocyclo
tpe := p.parameter.Type
if p.parameter.Format != "" {
tpe = p.parameter.Format
@@ -317,7 +323,7 @@ func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue in
return nil
}
- switch target.Kind() {
+ switch target.Kind() { //nolint:exhaustive // we want to check only types that map from a swagger parameter
case reflect.Bool:
if data == "" {
if target.CanSet() {
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go
deleted file mode 100644
index 03385251e19..00000000000
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !go1.8
-
-package middleware
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.QueryUnescape(path)
-}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
index 4be330d6dc3..ef75e7441fc 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
@@ -10,67 +10,57 @@ import (
// RapiDocOpts configures the RapiDoc middlewares
type RapiDocOpts struct {
- // BasePath for the UI path, defaults to: /
+ // BasePath for the UI, defaults to: /
BasePath string
- // Path combines with BasePath for the full UI path, defaults to: docs
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
- // SpecURL the url to find the spec for
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
SpecURL string
- // RapiDocURL for the js that generates the rapidoc site, defaults to: https://cdn.jsdelivr.net/npm/rapidoc/bundles/rapidoc.standalone.js
- RapiDocURL string
+
// Title for the documentation site, default to: API documentation
Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
+ // RapiDocURL points to the js asset that generates the rapidoc site.
+ //
+ // Defaults to https://unpkg.com/rapidoc/dist/rapidoc-min.js
+ RapiDocURL string
}
-// EnsureDefaults in case some options are missing
func (r *RapiDocOpts) EnsureDefaults() {
- if r.BasePath == "" {
- r.BasePath = "/"
- }
- if r.Path == "" {
- r.Path = "docs"
- }
- if r.SpecURL == "" {
- r.SpecURL = "/swagger.json"
- }
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // rapidoc-specifics
if r.RapiDocURL == "" {
r.RapiDocURL = rapidocLatest
}
- if r.Title == "" {
- r.Title = "API documentation"
+ if r.Template == "" {
+ r.Template = rapidocTemplate
}
}
// RapiDoc creates a middleware to serve a documentation site for a swagger spec.
-// This allows for altering the spec before starting the http listener.
//
+// This allows for altering the spec before starting the http listener.
func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
- tmpl := template.Must(template.New("rapidoc").Parse(rapidocTemplate))
-
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if r.URL.Path == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
+ tmpl := template.Must(template.New("rapidoc").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ return serveUI(pth, assets.Bytes(), next)
}
const (
@@ -79,7 +69,7 @@ const (
{{ .Title }}
-
+
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/redoc.go
index 019c854295b..b96b01e7f3f 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/redoc.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/redoc.go
@@ -10,67 +10,58 @@ import (
// RedocOpts configures the Redoc middlewares
type RedocOpts struct {
- // BasePath for the UI path, defaults to: /
+ // BasePath for the UI, defaults to: /
BasePath string
- // Path combines with BasePath for the full UI path, defaults to: docs
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
- // SpecURL the url to find the spec for
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
SpecURL string
- // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
- RedocURL string
+
// Title for the documentation site, default to: API documentation
Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
+ // RedocURL points to the js that generates the redoc site.
+ //
+ // Defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
+ RedocURL string
}
// EnsureDefaults in case some options are missing
func (r *RedocOpts) EnsureDefaults() {
- if r.BasePath == "" {
- r.BasePath = "/"
- }
- if r.Path == "" {
- r.Path = "docs"
- }
- if r.SpecURL == "" {
- r.SpecURL = "/swagger.json"
- }
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // redoc-specifics
if r.RedocURL == "" {
r.RedocURL = redocLatest
}
- if r.Title == "" {
- r.Title = "API documentation"
+ if r.Template == "" {
+ r.Template = redocTemplate
}
}
// Redoc creates a middleware to serve a documentation site for a swagger spec.
-// This allows for altering the spec before starting the http listener.
//
+// This allows for altering the spec before starting the http listener.
func Redoc(opts RedocOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
- tmpl := template.Must(template.New("redoc").Parse(redocTemplate))
-
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if r.URL.Path == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
+ tmpl := template.Must(template.New("redoc").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ return serveUI(pth, assets.Bytes(), next)
}
const (
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/request.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/request.go
index 760c37861d0..82e14366523 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/request.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/request.go
@@ -19,10 +19,10 @@ import (
"reflect"
"github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/runtime/logger"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
-
- "github.com/go-openapi/runtime"
)
// UntypedRequestBinder binds and validates the data from a http request
@@ -31,6 +31,7 @@ type UntypedRequestBinder struct {
Parameters map[string]spec.Parameter
Formats strfmt.Registry
paramBinders map[string]*untypedParamBinder
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
// NewUntypedRequestBinder creates a new binder for reading a request.
@@ -44,6 +45,7 @@ func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Sw
paramBinders: binders,
Spec: spec,
Formats: formats,
+ debugLogf: debugLogfFunc(nil),
}
}
@@ -52,10 +54,10 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
val := reflect.Indirect(reflect.ValueOf(data))
isMap := val.Kind() == reflect.Map
var result []error
- debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
+ o.debugLogf("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
for fieldName, param := range o.Parameters {
binder := o.paramBinders[fieldName]
- debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
+ o.debugLogf("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
var target reflect.Value
if !isMap {
binder.Name = fieldName
@@ -65,7 +67,7 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
if isMap {
tpe := binder.Type()
if tpe == nil {
- if param.Schema.Type.Contains("array") {
+ if param.Schema.Type.Contains(typeArray) {
tpe = reflect.TypeOf([]interface{}{})
} else {
tpe = reflect.TypeOf(map[string]interface{}{})
@@ -102,3 +104,14 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
return nil
}
+
+// SetLogger allows for injecting a logger to catch debug entries.
+//
+// The logger is enabled in DEBUG mode only.
+func (o *UntypedRequestBinder) SetLogger(lg logger.Logger) {
+ o.debugLogf = debugLogfFunc(lg)
+}
+
+func (o *UntypedRequestBinder) setDebugLogf(fn func(string, ...any)) {
+ o.debugLogf = fn
+}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/router.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/router.go
index 5052031c8d7..3a6aee90e50 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/router.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/router.go
@@ -17,10 +17,12 @@ package middleware
import (
"fmt"
"net/http"
+ "net/url"
fpath "path"
"regexp"
"strings"
+ "github.com/go-openapi/runtime/logger"
"github.com/go-openapi/runtime/security"
"github.com/go-openapi/swag"
@@ -67,10 +69,10 @@ func (r RouteParams) GetOK(name string) ([]string, bool, bool) {
return nil, false, false
}
-// NewRouter creates a new context aware router middleware
+// NewRouter creates a new context-aware router middleware
func NewRouter(ctx *Context, next http.Handler) http.Handler {
if ctx.router == nil {
- ctx.router = DefaultRouter(ctx.spec, ctx.api)
+ ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf))
}
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
@@ -103,41 +105,75 @@ type RoutableAPI interface {
DefaultConsumes() string
}
-// Router represents a swagger aware router
+// Router represents a swagger-aware router
type Router interface {
Lookup(method, path string) (*MatchedRoute, bool)
OtherMethods(method, path string) []string
}
type defaultRouteBuilder struct {
- spec *loads.Document
- analyzer *analysis.Spec
- api RoutableAPI
- records map[string][]denco.Record
+ spec *loads.Document
+ analyzer *analysis.Spec
+ api RoutableAPI
+ records map[string][]denco.Record
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
type defaultRouter struct {
- spec *loads.Document
- routers map[string]*denco.Router
+ spec *loads.Document
+ routers map[string]*denco.Router
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
-func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder {
+func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) *defaultRouteBuilder {
+ var o defaultRouterOpts
+ for _, apply := range opts {
+ apply(&o)
+ }
+ if o.debugLogf == nil {
+ o.debugLogf = debugLogfFunc(nil) // defaults to standard logger
+ }
+
return &defaultRouteBuilder{
- spec: spec,
- analyzer: analysis.New(spec.Spec()),
- api: api,
- records: make(map[string][]denco.Record),
+ spec: spec,
+ analyzer: analysis.New(spec.Spec()),
+ api: api,
+ records: make(map[string][]denco.Record),
+ debugLogf: o.debugLogf,
}
}
-// DefaultRouter creates a default implemenation of the router
-func DefaultRouter(spec *loads.Document, api RoutableAPI) Router {
- builder := newDefaultRouteBuilder(spec, api)
+// DefaultRouterOpt allows to inject optional behavior to the default router.
+type DefaultRouterOpt func(*defaultRouterOpts)
+
+type defaultRouterOpts struct {
+ debugLogf func(string, ...any)
+}
+
+// WithDefaultRouterLogger sets the debug logger for the default router.
+//
+// This is enabled only in DEBUG mode.
+func WithDefaultRouterLogger(lg logger.Logger) DefaultRouterOpt {
+ return func(o *defaultRouterOpts) {
+ o.debugLogf = debugLogfFunc(lg)
+ }
+}
+
+// WithDefaultRouterLoggerFunc sets a logging debug method for the default router.
+func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt {
+ return func(o *defaultRouterOpts) {
+ o.debugLogf = fn
+ }
+}
+
+// DefaultRouter creates a default implementation of the router
+func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router {
+ builder := newDefaultRouteBuilder(spec, api, opts...)
if spec != nil {
for method, paths := range builder.analyzer.Operations() {
for path, operation := range paths {
fp := fpath.Join(spec.BasePath(), path)
- debugLog("adding route %s %s %q", method, fp, operation.ID)
+ builder.debugLogf("adding route %s %s %q", method, fp, operation.ID)
builder.AddRoute(method, fp, operation)
}
}
@@ -319,24 +355,24 @@ func (m *MatchedRoute) NeedsAuth() bool {
func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
mth := strings.ToUpper(method)
- debugLog("looking up route for %s %s", method, path)
+ d.debugLogf("looking up route for %s %s", method, path)
if Debug {
if len(d.routers) == 0 {
- debugLog("there are no known routers")
+ d.debugLogf("there are no known routers")
}
for meth := range d.routers {
- debugLog("got a router for %s", meth)
+ d.debugLogf("got a router for %s", meth)
}
}
if router, ok := d.routers[mth]; ok {
if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil {
if entry, ok := m.(*routeEntry); ok {
- debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
+ d.debugLogf("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
var params RouteParams
for _, p := range rp {
- v, err := pathUnescape(p.Value)
+ v, err := url.PathUnescape(p.Value)
if err != nil {
- debugLog("failed to escape %q: %v", p.Value, err)
+ d.debugLogf("failed to escape %q: %v", p.Value, err)
v = p.Value
}
// a workaround to handle fragment/composing parameters until they are supported in denco router
@@ -356,10 +392,10 @@ func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
return &MatchedRoute{routeEntry: *entry, Params: params}, true
}
} else {
- debugLog("couldn't find a route by path for %s %s", method, path)
+ d.debugLogf("couldn't find a route by path for %s %s", method, path)
}
} else {
- debugLog("couldn't find a route by method for %s %s", method, path)
+ d.debugLogf("couldn't find a route by method for %s %s", method, path)
}
return nil, false
}
@@ -378,6 +414,10 @@ func (d *defaultRouter) OtherMethods(method, path string) []string {
return methods
}
+func (d *defaultRouter) SetLogger(lg logger.Logger) {
+ d.debugLogf = debugLogfFunc(lg)
+}
+
// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco
var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`)
@@ -413,7 +453,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
bp = bp[:len(bp)-1]
}
- debugLog("operation: %#v", *operation)
+ d.debugLogf("operation: %#v", *operation)
if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok {
consumes := d.analyzer.ConsumesFor(operation)
produces := d.analyzer.ProducesFor(operation)
@@ -428,6 +468,8 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
produces = append(produces, defProduces)
}
+ requestBinder := NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats())
+ requestBinder.setDebugLogf(d.debugLogf)
record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{
BasePath: bp,
PathPattern: path,
@@ -439,7 +481,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
Producers: d.api.ProducersFor(normalizeOffers(produces)),
Parameters: parameters,
Formats: d.api.Formats(),
- Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()),
+ Binder: requestBinder,
Authenticators: d.buildAuthenticators(operation),
Authorizer: d.api.Authorizer(),
})
@@ -449,11 +491,11 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators {
requirements := d.analyzer.SecurityRequirementsFor(operation)
- var auths []RouteAuthenticator
+ auths := make([]RouteAuthenticator, 0, len(requirements))
for _, reqs := range requirements {
- var schemes []string
+ schemes := make([]string, 0, len(reqs))
scopes := make(map[string][]string, len(reqs))
- var scopeSlices [][]string
+ scopeSlices := make([][]string, 0, len(reqs))
for _, req := range reqs {
schemes = append(schemes, req.Name)
scopes[req.Name] = req.Scopes
@@ -482,7 +524,8 @@ func (d *defaultRouteBuilder) Build() *defaultRouter {
routers[method] = router
}
return &defaultRouter{
- spec: d.spec,
- routers: routers,
+ spec: d.spec,
+ routers: routers,
+ debugLogf: d.debugLogf,
}
}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/spec.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/spec.go
index f0291429806..87e17e34249 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/spec.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/spec.go
@@ -19,30 +19,84 @@ import (
"path"
)
-// Spec creates a middleware to serve a swagger spec.
+const (
+ contentTypeHeader = "Content-Type"
+ applicationJSON = "application/json"
+)
+
+// SpecOption can be applied to the Spec serving middleware
+type SpecOption func(*specOptions)
+
+var defaultSpecOptions = specOptions{
+ Path: "",
+ Document: "swagger.json",
+}
+
+type specOptions struct {
+ Path string
+ Document string
+}
+
+func specOptionsWithDefaults(opts []SpecOption) specOptions {
+ o := defaultSpecOptions
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+// Spec creates a middleware to serve a swagger spec as a JSON document.
+//
// This allows for altering the spec before starting the http listener.
-// This can be useful if you want to serve the swagger spec from another path than /swagger.json
//
-func Spec(basePath string, b []byte, next http.Handler) http.Handler {
+// The basePath argument indicates the path of the spec document (defaults to "/").
+// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json").
+func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler {
if basePath == "" {
basePath = "/"
}
- pth := path.Join(basePath, "swagger.json")
+ o := specOptionsWithDefaults(opts)
+ pth := path.Join(basePath, o.Path, o.Document)
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if r.URL.Path == pth {
- rw.Header().Set("Content-Type", "application/json")
+ if path.Clean(r.URL.Path) == pth {
+ rw.Header().Set(contentTypeHeader, applicationJSON)
rw.WriteHeader(http.StatusOK)
- //#nosec
_, _ = rw.Write(b)
+
return
}
- if next == nil {
- rw.Header().Set("Content-Type", "application/json")
- rw.WriteHeader(http.StatusNotFound)
+ if next != nil {
+ next.ServeHTTP(rw, r)
+
return
}
- next.ServeHTTP(rw, r)
+
+ rw.Header().Set(contentTypeHeader, applicationJSON)
+ rw.WriteHeader(http.StatusNotFound)
})
}
+
+// WithSpecPath sets the path to be joined to the base path of the Spec middleware.
+//
+// This is empty by default.
+func WithSpecPath(pth string) SpecOption {
+ return func(o *specOptions) {
+ o.Path = pth
+ }
+}
+
+// WithSpecDocument sets the name of the JSON document served as a spec.
+//
+// By default, this is "swagger.json"
+func WithSpecDocument(doc string) SpecOption {
+ return func(o *specOptions) {
+ if doc == "" {
+ return
+ }
+
+ o.Document = doc
+ }
+}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
index b4dea29e4bc..ec3c10cbafa 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
@@ -8,40 +8,65 @@ import (
"path"
)
-// SwaggerUIOpts configures the Swaggerui middlewares
+// SwaggerUIOpts configures the SwaggerUI middleware
type SwaggerUIOpts struct {
- // BasePath for the UI path, defaults to: /
+ // BasePath for the API, defaults to: /
BasePath string
- // Path combines with BasePath for the full UI path, defaults to: docs
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
- // SpecURL the url to find the spec for
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
// OAuthCallbackURL the url called after OAuth2 login
OAuthCallbackURL string
// The three components needed to embed swagger-ui
- SwaggerURL string
+
+ // SwaggerURL points to the js that generates the SwaggerUI site.
+ //
+ // Defaults to: https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js
+ SwaggerURL string
+
SwaggerPresetURL string
SwaggerStylesURL string
Favicon32 string
Favicon16 string
-
- // Title for the documentation site, default to: API documentation
- Title string
}
// EnsureDefaults in case some options are missing
func (r *SwaggerUIOpts) EnsureDefaults() {
- if r.BasePath == "" {
- r.BasePath = "/"
- }
- if r.Path == "" {
- r.Path = "docs"
+ r.ensureDefaults()
+
+ if r.Template == "" {
+ r.Template = swaggeruiTemplate
}
- if r.SpecURL == "" {
- r.SpecURL = "/swagger.json"
+}
+
+func (r *SwaggerUIOpts) EnsureDefaultsOauth2() {
+ r.ensureDefaults()
+
+ if r.Template == "" {
+ r.Template = swaggerOAuthTemplate
}
+}
+
+func (r *SwaggerUIOpts) ensureDefaults() {
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // swaggerui-specifics
if r.OAuthCallbackURL == "" {
r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback")
}
@@ -60,40 +85,22 @@ func (r *SwaggerUIOpts) EnsureDefaults() {
if r.Favicon32 == "" {
r.Favicon32 = swaggerFavicon32Latest
}
- if r.Title == "" {
- r.Title = "API documentation"
- }
}
// SwaggerUI creates a middleware to serve a documentation site for a swagger spec.
+//
// This allows for altering the spec before starting the http listener.
func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
- tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate))
-
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, &opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if path.Join(r.URL.Path) == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
-
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ tmpl := template.Must(template.New("swaggerui").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
+
+ return serveUI(pth, assets.Bytes(), next)
}
const (
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
index 576f6003f7b..e81212f71c8 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
@@ -4,37 +4,20 @@ import (
"bytes"
"fmt"
"net/http"
- "path"
"text/template"
)
func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler {
- opts.EnsureDefaults()
+ opts.EnsureDefaultsOauth2()
pth := opts.OAuthCallbackURL
- tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate))
+ tmpl := template.Must(template.New("swaggeroauth").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, &opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if path.Join(r.URL.Path) == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
-
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ return serveUI(pth, assets.Bytes(), next)
}
const (
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/ui_options.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/ui_options.go
new file mode 100644
index 00000000000..b86efa0089e
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/ui_options.go
@@ -0,0 +1,173 @@
+package middleware
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+)
+
+const (
+ // constants that are common to all UI-serving middlewares
+ defaultDocsPath = "docs"
+ defaultDocsURL = "/swagger.json"
+ defaultDocsTitle = "API Documentation"
+)
+
+// uiOptions defines common options for UI serving middlewares.
+type uiOptions struct {
+ // BasePath for the UI, defaults to: /
+ BasePath string
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
+ Path string
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
+ SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+}
+
+// toCommonUIOptions converts any UI option type to retain the common options.
+//
+// This uses gob encoding/decoding to convert common fields from one struct to another.
+func toCommonUIOptions(opts interface{}) uiOptions {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ dec := gob.NewDecoder(&buf)
+ var o uiOptions
+ err := enc.Encode(opts)
+ if err != nil {
+ panic(err)
+ }
+
+ err = dec.Decode(&o)
+ if err != nil {
+ panic(err)
+ }
+
+ return o
+}
+
+func fromCommonToAnyOptions[T any](source uiOptions, target *T) {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ dec := gob.NewDecoder(&buf)
+ err := enc.Encode(source)
+ if err != nil {
+ panic(err)
+ }
+
+ err = dec.Decode(target)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// UIOption can be applied to UI serving middleware, such as Context.APIHandler or
+// Context.APIHandlerSwaggerUI to alter the defaut behavior.
+type UIOption func(*uiOptions)
+
+func uiOptionsWithDefaults(opts []UIOption) uiOptions {
+ var o uiOptions
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+// WithUIBasePath sets the base path from where to serve the UI assets.
+//
+// By default, Context middleware sets this value to the API base path.
+func WithUIBasePath(base string) UIOption {
+ return func(o *uiOptions) {
+ if !strings.HasPrefix(base, "/") {
+ base = "/" + base
+ }
+ o.BasePath = base
+ }
+}
+
+// WithUIPath sets the path from where to serve the UI assets (i.e. /{basepath}/{path}.
+func WithUIPath(pth string) UIOption {
+ return func(o *uiOptions) {
+ o.Path = pth
+ }
+}
+
+// WithUISpecURL sets the path from where to serve swagger spec document.
+//
+// This may be specified as a full URL or a path.
+//
+// By default, this is "/swagger.json"
+func WithUISpecURL(specURL string) UIOption {
+ return func(o *uiOptions) {
+ o.SpecURL = specURL
+ }
+}
+
+// WithUITitle sets the title of the UI.
+//
+// By default, Context middleware sets this value to the title found in the API spec.
+func WithUITitle(title string) UIOption {
+ return func(o *uiOptions) {
+ o.Title = title
+ }
+}
+
+// WithTemplate allows to set a custom template for the UI.
+//
+// UI middleware will panic if the template does not parse or execute properly.
+func WithTemplate(tpl string) UIOption {
+ return func(o *uiOptions) {
+ o.Template = tpl
+ }
+}
+
+// EnsureDefaults in case some options are missing
+func (r *uiOptions) EnsureDefaults() {
+ if r.BasePath == "" {
+ r.BasePath = "/"
+ }
+ if r.Path == "" {
+ r.Path = defaultDocsPath
+ }
+ if r.SpecURL == "" {
+ r.SpecURL = defaultDocsURL
+ }
+ if r.Title == "" {
+ r.Title = defaultDocsTitle
+ }
+}
+
+// serveUI creates a middleware that serves a templated asset as text/html.
+func serveUI(pth string, assets []byte, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ if path.Clean(r.URL.Path) == pth {
+ rw.Header().Set(contentTypeHeader, "text/html; charset=utf-8")
+ rw.WriteHeader(http.StatusOK)
+ _, _ = rw.Write(assets)
+
+ return
+ }
+
+ if next != nil {
+ next.ServeHTTP(rw, r)
+
+ return
+ }
+
+ rw.Header().Set(contentTypeHeader, "text/plain")
+ rw.WriteHeader(http.StatusNotFound)
+ _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
+ })
+}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
index 39a85f7d9e8..7b7269bd195 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
@@ -197,30 +197,31 @@ func (d *API) Validate() error {
// validateWith validates the registrations in this API against the provided spec analyzer
func (d *API) validate() error {
- var consumes []string
+ consumes := make([]string, 0, len(d.consumers))
for k := range d.consumers {
consumes = append(consumes, k)
}
- var produces []string
+ produces := make([]string, 0, len(d.producers))
for k := range d.producers {
produces = append(produces, k)
}
- var authenticators []string
+ authenticators := make([]string, 0, len(d.authenticators))
for k := range d.authenticators {
authenticators = append(authenticators, k)
}
- var operations []string
+ operations := make([]string, 0, len(d.operations))
for m, v := range d.operations {
for p := range v {
operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p))
}
}
- var definedAuths []string
- for k := range d.spec.Spec().SecurityDefinitions {
+ secDefinitions := d.spec.Spec().SecurityDefinitions
+ definedAuths := make([]string, 0, len(secDefinitions))
+ for k := range secDefinitions {
definedAuths = append(definedAuths, k)
}
@@ -267,7 +268,7 @@ func (d *API) verify(name string, registrations []string, expectations []string)
delete(expected, k)
}
- var unregistered []string
+ unregistered := make([]string, 0, len(expected))
for k := range expected {
unregistered = append(unregistered, k)
}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/middleware/validation.go b/test/tools/vendor/github.com/go-openapi/runtime/middleware/validation.go
index 1f0135b5788..0a5356c6075 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/middleware/validation.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/middleware/validation.go
@@ -35,7 +35,6 @@ type validation struct {
// ContentType validates the content type of a request
func validateContentType(allowed []string, actual string) error {
- debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", "))
if len(allowed) == 0 {
return nil
}
@@ -57,13 +56,13 @@ func validateContentType(allowed []string, actual string) error {
}
func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation {
- debugLog("validating request %s %s", request.Method, request.URL.EscapedPath())
validate := &validation{
context: ctx,
request: request,
route: route,
bound: make(map[string]interface{}),
}
+ validate.debugLogf("validating request %s %s", request.Method, request.URL.EscapedPath())
validate.contentType()
if len(validate.result) == 0 {
@@ -76,8 +75,12 @@ func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *
return validate
}
+func (v *validation) debugLogf(format string, args ...any) {
+ v.context.debugLogf(format, args...)
+}
+
func (v *validation) parameters() {
- debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
+ v.debugLogf("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil {
if result.Error() == "validation failure list" {
for _, e := range result.(*errors.Validation).Value.([]interface{}) {
@@ -91,7 +94,7 @@ func (v *validation) parameters() {
func (v *validation) contentType() {
if len(v.result) == 0 && runtime.HasBody(v.request) {
- debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
+ v.debugLogf("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
ct, _, req, err := v.context.ContentType(v.request)
if err != nil {
v.result = append(v.result, err)
@@ -100,6 +103,7 @@ func (v *validation) contentType() {
}
if len(v.result) == 0 {
+ v.debugLogf("validating content type for %q against [%s]", ct, strings.Join(v.route.Consumes, ", "))
if err := validateContentType(v.route.Consumes, ct); err != nil {
v.result = append(v.result, err)
}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/request.go b/test/tools/vendor/github.com/go-openapi/runtime/request.go
index 078fda17396..9e3e1ecb148 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/request.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/request.go
@@ -16,6 +16,8 @@ package runtime
import (
"bufio"
+ "context"
+ "errors"
"io"
"net/http"
"strings"
@@ -96,10 +98,16 @@ func (p *peekingReader) Read(d []byte) (int, error) {
if p == nil {
return 0, io.EOF
}
+ if p.underlying == nil {
+ return 0, io.ErrUnexpectedEOF
+ }
return p.underlying.Read(d)
}
func (p *peekingReader) Close() error {
+ if p.underlying == nil {
+ return errors.New("reader already closed")
+ }
p.underlying = nil
if p.orig != nil {
return p.orig.Close()
@@ -107,9 +115,11 @@ func (p *peekingReader) Close() error {
return nil
}
-// JSONRequest creates a new http request with json headers set
+// JSONRequest creates a new http request with json headers set.
+//
+// It uses context.Background.
func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
- req, err := http.NewRequest(method, urlStr, body)
+ req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body)
if err != nil {
return nil, err
}
diff --git a/test/tools/vendor/github.com/go-openapi/runtime/security/authenticator.go b/test/tools/vendor/github.com/go-openapi/runtime/security/authenticator.go
index c3ffdac7e87..bb30472bbee 100644
--- a/test/tools/vendor/github.com/go-openapi/runtime/security/authenticator.go
+++ b/test/tools/vendor/github.com/go-openapi/runtime/security/authenticator.go
@@ -25,12 +25,13 @@ import (
)
const (
- query = "query"
- header = "header"
+ query = "query"
+ header = "header"
+ accessTokenParam = "access_token"
)
// HttpAuthenticator is a function that authenticates a HTTP request
-func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator {
+func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { //nolint:revive,stylecheck
return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) {
if request, ok := params.(*http.Request); ok {
return handler(request)
@@ -158,7 +159,7 @@ func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authe
inl := strings.ToLower(in)
if inl != query && inl != header {
// panic because this is most likely a typo
- panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"."))
+ panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\""))
}
var getToken func(*http.Request) string
@@ -186,7 +187,7 @@ func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime
inl := strings.ToLower(in)
if inl != query && inl != header {
// panic because this is most likely a typo
- panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"."))
+ panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\""))
}
var getToken func(*http.Request) string
@@ -226,12 +227,12 @@ func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Aut
}
if token == "" {
qs := r.Request.URL.Query()
- token = qs.Get("access_token")
+ token = qs.Get(accessTokenParam)
}
//#nosec
ct, _, _ := runtime.ContentType(r.Request.Header)
if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
- token = r.Request.FormValue("access_token")
+ token = r.Request.FormValue(accessTokenParam)
}
if token == "" {
@@ -256,12 +257,12 @@ func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runti
}
if token == "" {
qs := r.Request.URL.Query()
- token = qs.Get("access_token")
+ token = qs.Get(accessTokenParam)
}
//#nosec
ct, _, _ := runtime.ContentType(r.Request.Header)
if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
- token = r.Request.FormValue("access_token")
+ token = r.Request.FormValue(accessTokenParam)
}
if token == "" {
diff --git a/test/tools/vendor/github.com/go-openapi/spec/.gitignore b/test/tools/vendor/github.com/go-openapi/spec/.gitignore
index dd91ed6a04e..f47cb2045f1 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/.gitignore
+++ b/test/tools/vendor/github.com/go-openapi/spec/.gitignore
@@ -1,2 +1 @@
-secrets.yml
-coverage.out
+*.out
diff --git a/test/tools/vendor/github.com/go-openapi/spec/.golangci.yml b/test/tools/vendor/github.com/go-openapi/spec/.golangci.yml
index 835d55e7425..22f8d21cca1 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/spec/.golangci.yml
@@ -11,7 +11,7 @@ linters-settings:
threshold: 200
goconst:
min-len: 2
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -40,3 +40,22 @@ linters:
- tparallel
- thelper
- ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/spec/README.md b/test/tools/vendor/github.com/go-openapi/spec/README.md
index 18782c6dafe..7fd2810c698 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/README.md
+++ b/test/tools/vendor/github.com/go-openapi/spec/README.md
@@ -1,8 +1,5 @@
-# OAI object model
+# OpenAPI v2 object model [](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/spec)
-[](https://travis-ci.org/go-openapi/spec)
-
-[](https://codecov.io/gh/go-openapi/spec)
[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE)
[](https://pkg.go.dev/github.com/go-openapi/spec)
@@ -32,3 +29,26 @@ The object model for OpenAPI specification documents.
> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
>
> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3
+
+* Does the unmarshaling support YAML?
+
+> Not directly. The exposed types know only how to unmarshal from JSON.
+>
+> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by
+> github.com/go-openapi/loads
+>
+> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec
+>
+> See also https://github.com/go-openapi/spec/issues/164
+
+* How can I validate a spec?
+
+> Validation is provided by [the validate package](http://github.com/go-openapi/validate)
+
+* Why do we have an `ID` field for `Schema` which is not part of the swagger spec?
+
+> We found jsonschema compatibility more important: since `id` in jsonschema influences
+> how `$ref` are resolved.
+> This `id` does not conflict with any property named `id`.
+>
+> See also https://github.com/go-openapi/spec/issues/23
diff --git a/test/tools/vendor/github.com/go-openapi/spec/appveyor.yml b/test/tools/vendor/github.com/go-openapi/spec/appveyor.yml
deleted file mode 100644
index 0903593916e..00000000000
--- a/test/tools/vendor/github.com/go-openapi/spec/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: "0.1.{build}"
-
-clone_folder: C:\go-openapi\spec
-shallow_clone: true # for startup speed
-pull_requests:
- do_not_increment_build_number: true
-
-#skip_tags: true
-#skip_branch_with_pr: true
-
-# appveyor.yml
-build: off
-
-environment:
- GOPATH: c:\gopath
-
-stack: go 1.15
-
-test_script:
- - go test -v -timeout 20m ./...
-
-deploy: off
-
-notifications:
- - provider: Slack
- incoming_webhook: https://hooks.slack.com/services/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
- auth_token:
- secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4=
- channel: bots
- on_build_success: false
- on_build_failure: true
- on_build_status_changed: true
diff --git a/test/tools/vendor/github.com/go-openapi/spec/bindata.go b/test/tools/vendor/github.com/go-openapi/spec/bindata.go
deleted file mode 100644
index afc83850c2e..00000000000
--- a/test/tools/vendor/github.com/go-openapi/spec/bindata.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Code generated by go-bindata. DO NOT EDIT.
-// sources:
-// schemas/jsonschema-draft-04.json (4.357kB)
-// schemas/v2/schema.json (40.248kB)
-
-package spec
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/sha256"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "time"
-)
-
-func bindataRead(data []byte, name string) ([]byte, error) {
- gz, err := gzip.NewReader(bytes.NewBuffer(data))
- if err != nil {
- return nil, fmt.Errorf("read %q: %v", name, err)
- }
-
- var buf bytes.Buffer
- _, err = io.Copy(&buf, gz)
- clErr := gz.Close()
-
- if err != nil {
- return nil, fmt.Errorf("read %q: %v", name, err)
- }
- if clErr != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-type asset struct {
- bytes []byte
- info os.FileInfo
- digest [sha256.Size]byte
-}
-
-type bindataFileInfo struct {
- name string
- size int64
- mode os.FileMode
- modTime time.Time
-}
-
-func (fi bindataFileInfo) Name() string {
- return fi.name
-}
-func (fi bindataFileInfo) Size() int64 {
- return fi.size
-}
-func (fi bindataFileInfo) Mode() os.FileMode {
- return fi.mode
-}
-func (fi bindataFileInfo) ModTime() time.Time {
- return fi.modTime
-}
-func (fi bindataFileInfo) IsDir() bool {
- return false
-}
-func (fi bindataFileInfo) Sys() interface{} {
- return nil
-}
-
-var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00")
-
-func jsonschemaDraft04JsonBytes() ([]byte, error) {
- return bindataRead(
- _jsonschemaDraft04Json,
- "jsonschema-draft-04.json",
- )
-}
-
-func jsonschemaDraft04Json() (*asset, error) {
- bytes, err := jsonschemaDraft04JsonBytes()
- if err != nil {
- return nil, err
- }
-
- info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}}
- return a, nil
-}
-
-var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00")
-
-func v2SchemaJsonBytes() ([]byte, error) {
- return bindataRead(
- _v2SchemaJson,
- "v2/schema.json",
- )
-}
-
-func v2SchemaJson() (*asset, error) {
- bytes, err := v2SchemaJsonBytes()
- if err != nil {
- return nil, err
- }
-
- info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}}
- return a, nil
-}
-
-// Asset loads and returns the asset for the given name.
-// It returns an error if the asset could not be found or
-// could not be loaded.
-func Asset(name string) ([]byte, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- if f, ok := _bindata[canonicalName]; ok {
- a, err := f()
- if err != nil {
- return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
- }
- return a.bytes, nil
- }
- return nil, fmt.Errorf("Asset %s not found", name)
-}
-
-// AssetString returns the asset contents as a string (instead of a []byte).
-func AssetString(name string) (string, error) {
- data, err := Asset(name)
- return string(data), err
-}
-
-// MustAsset is like Asset but panics when Asset would return an error.
-// It simplifies safe initialization of global variables.
-func MustAsset(name string) []byte {
- a, err := Asset(name)
- if err != nil {
- panic("asset: Asset(" + name + "): " + err.Error())
- }
-
- return a
-}
-
-// MustAssetString is like AssetString but panics when Asset would return an
-// error. It simplifies safe initialization of global variables.
-func MustAssetString(name string) string {
- return string(MustAsset(name))
-}
-
-// AssetInfo loads and returns the asset info for the given name.
-// It returns an error if the asset could not be found or
-// could not be loaded.
-func AssetInfo(name string) (os.FileInfo, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- if f, ok := _bindata[canonicalName]; ok {
- a, err := f()
- if err != nil {
- return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
- }
- return a.info, nil
- }
- return nil, fmt.Errorf("AssetInfo %s not found", name)
-}
-
-// AssetDigest returns the digest of the file with the given name. It returns an
-// error if the asset could not be found or the digest could not be loaded.
-func AssetDigest(name string) ([sha256.Size]byte, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- if f, ok := _bindata[canonicalName]; ok {
- a, err := f()
- if err != nil {
- return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
- }
- return a.digest, nil
- }
- return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
-}
-
-// Digests returns a map of all known files and their checksums.
-func Digests() (map[string][sha256.Size]byte, error) {
- mp := make(map[string][sha256.Size]byte, len(_bindata))
- for name := range _bindata {
- a, err := _bindata[name]()
- if err != nil {
- return nil, err
- }
- mp[name] = a.digest
- }
- return mp, nil
-}
-
-// AssetNames returns the names of the assets.
-func AssetNames() []string {
- names := make([]string, 0, len(_bindata))
- for name := range _bindata {
- names = append(names, name)
- }
- return names
-}
-
-// _bindata is a table, holding each asset generator, mapped to its name.
-var _bindata = map[string]func() (*asset, error){
- "jsonschema-draft-04.json": jsonschemaDraft04Json,
-
- "v2/schema.json": v2SchemaJson,
-}
-
-// AssetDir returns the file names below a certain
-// directory embedded in the file by go-bindata.
-// For example if you run go-bindata on data/... and data contains the
-// following hierarchy:
-// data/
-// foo.txt
-// img/
-// a.png
-// b.png
-// then AssetDir("data") would return []string{"foo.txt", "img"},
-// AssetDir("data/img") would return []string{"a.png", "b.png"},
-// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
-// AssetDir("") will return []string{"data"}.
-func AssetDir(name string) ([]string, error) {
- node := _bintree
- if len(name) != 0 {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- pathList := strings.Split(canonicalName, "/")
- for _, p := range pathList {
- node = node.Children[p]
- if node == nil {
- return nil, fmt.Errorf("Asset %s not found", name)
- }
- }
- }
- if node.Func != nil {
- return nil, fmt.Errorf("Asset %s not found", name)
- }
- rv := make([]string, 0, len(node.Children))
- for childName := range node.Children {
- rv = append(rv, childName)
- }
- return rv, nil
-}
-
-type bintree struct {
- Func func() (*asset, error)
- Children map[string]*bintree
-}
-
-var _bintree = &bintree{nil, map[string]*bintree{
- "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}},
- "v2": {nil, map[string]*bintree{
- "schema.json": {v2SchemaJson, map[string]*bintree{}},
- }},
-}}
-
-// RestoreAsset restores an asset under the given directory.
-func RestoreAsset(dir, name string) error {
- data, err := Asset(name)
- if err != nil {
- return err
- }
- info, err := AssetInfo(name)
- if err != nil {
- return err
- }
- err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
- if err != nil {
- return err
- }
- err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
- if err != nil {
- return err
- }
- return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
-}
-
-// RestoreAssets restores an asset under the given directory recursively.
-func RestoreAssets(dir, name string) error {
- children, err := AssetDir(name)
- // File
- if err != nil {
- return RestoreAsset(dir, name)
- }
- // Dir
- for _, child := range children {
- err = RestoreAssets(dir, filepath.Join(name, child))
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func _filePath(dir, name string) string {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
-}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/embed.go b/test/tools/vendor/github.com/go-openapi/spec/embed.go
new file mode 100644
index 00000000000..1f4284750ab
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/spec/embed.go
@@ -0,0 +1,17 @@
+package spec
+
+import (
+ "embed"
+ "path"
+)
+
+//go:embed schemas/*.json schemas/*/*.json
+var assets embed.FS
+
+func jsonschemaDraft04JSONBytes() ([]byte, error) {
+ return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json"))
+}
+
+func v2SchemaJSONBytes() ([]byte, error) {
+ return assets.ReadFile(path.Join("schemas", "v2", "schema.json"))
+}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/expander.go b/test/tools/vendor/github.com/go-openapi/spec/expander.go
index d4ea889d44d..b81a5699a03 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/expander.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/expander.go
@@ -27,7 +27,6 @@ import (
// all relative $ref's will be resolved from there.
//
// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable.
-//
type ExpandOptions struct {
RelativeBase string // the path to the root document to expand. This is a file, not a directory
SkipSchemas bool // do not expand schemas, just paths, parameters and responses
@@ -58,7 +57,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
if !options.SkipSchemas {
for key, definition := range spec.Definitions {
parentRefs := make([]string, 0, 10)
- parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key))
+ parentRefs = append(parentRefs, "#/definitions/"+key)
def, err := expandSchema(definition, parentRefs, resolver, specBasePath)
if resolver.shouldStopOnError(err) {
@@ -103,15 +102,21 @@ const rootBase = ".root"
// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry
// for further $ref resolution
-//
-// Setting the cache is optional and this parameter may safely be left to nil.
func baseForRoot(root interface{}, cache ResolutionCache) string {
+ // cache the root document to resolve $ref's
+ normalizedBase := normalizeBase(rootBase)
+
if root == nil {
- return ""
+ // ensure that we never leave a nil root: always cache the root base pseudo-document
+ cachedRoot, found := cache.Get(normalizedBase)
+ if found && cachedRoot != nil {
+ // the cache is already preloaded with a root
+ return normalizedBase
+ }
+
+ root = map[string]interface{}{}
}
- // cache the root document to resolve $ref's
- normalizedBase := normalizeBase(rootBase)
cache.Set(normalizedBase, root)
return normalizedBase
@@ -208,7 +213,19 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba
}
if target.Ref.String() != "" {
- return expandSchemaRef(target, parentRefs, resolver, basePath)
+ if !resolver.options.SkipSchemas {
+ return expandSchemaRef(target, parentRefs, resolver, basePath)
+ }
+
+ // when "expand" with SkipSchema, we just rebase the existing $ref without replacing
+ // the full schema.
+ rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath))
+ if err != nil {
+ return nil, err
+ }
+ target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
+
+ return &target, nil
}
for k := range target.Definitions {
@@ -520,21 +537,25 @@ func getRefAndSchema(input interface{}) (*Ref, *Schema, error) {
}
func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error {
- ref, _, err := getRefAndSchema(input)
+ ref, sch, err := getRefAndSchema(input)
if err != nil {
return err
}
- if ref == nil {
+ if ref == nil && sch == nil { // nothing to do
return nil
}
parentRefs := make([]string, 0, 10)
- if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) {
- return err
+ if ref != nil {
+ // dereference this $ref
+ if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ ref, sch, _ = getRefAndSchema(input)
}
- ref, sch, _ := getRefAndSchema(input)
if ref.String() != "" {
transitiveResolver := resolver.transitiveResolver(basePath, *ref)
basePath = resolver.updateBasePath(transitiveResolver, basePath)
@@ -546,6 +567,7 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa
if ref != nil {
*ref = Ref{}
}
+
return nil
}
@@ -555,38 +577,29 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa
return ern
}
- switch {
- case resolver.isCircular(&rebasedRef, basePath, parentRefs...):
+ if resolver.isCircular(&rebasedRef, basePath, parentRefs...) {
// this is a circular $ref: stop expansion
if !resolver.options.AbsoluteCircularRef {
sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
} else {
sch.Ref = rebasedRef
}
- case !resolver.options.SkipSchemas:
- // schema expanded to a $ref in another root
- sch.Ref = rebasedRef
- debugLog("rebased to: %s", sch.Ref.String())
- default:
- // skip schema expansion but rebase $ref to schema
- sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
}
}
+ // $ref expansion or rebasing is performed by expandSchema below
if ref != nil {
*ref = Ref{}
}
// expand schema
- if !resolver.options.SkipSchemas {
- s, err := expandSchema(*sch, parentRefs, resolver, basePath)
- if resolver.shouldStopOnError(err) {
- return err
- }
- if s == nil {
- // guard for when continuing on error
- return nil
- }
+ // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref)
+ s, err := expandSchema(*sch, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ if s != nil { // guard for when continuing on error
*sch = *s
}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/test/tools/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
index 2df0723154f..f19f1a8fb65 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
@@ -40,5 +40,5 @@ func repairURI(in string) (*url.URL, string) {
return u, ""
}
-func fixWindowsURI(u *url.URL, in string) {
+func fixWindowsURI(_ *url.URL, _ string) {
}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/operation.go b/test/tools/vendor/github.com/go-openapi/spec/operation.go
index 995ce6acb17..a69cca88147 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/operation.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/operation.go
@@ -217,9 +217,12 @@ func (o *Operation) AddParam(param *Parameter) *Operation {
for i, p := range o.Parameters {
if p.Name == param.Name && p.In == param.In {
- params := append(o.Parameters[:i], *param)
+ params := make([]Parameter, 0, len(o.Parameters)+1)
+ params = append(params, o.Parameters[:i]...)
+ params = append(params, *param)
params = append(params, o.Parameters[i+1:]...)
o.Parameters = params
+
return o
}
}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/parameter.go b/test/tools/vendor/github.com/go-openapi/spec/parameter.go
index 2b2b89b67bf..bd4f1cdb076 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/parameter.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/parameter.go
@@ -84,27 +84,27 @@ type ParamProps struct {
// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
//
// There are five possible parameter types.
-// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part
-// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`,
-// the path parameter is `itemId`.
-// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
-// * Header - Custom headers that are expected as part of the request.
-// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be
-// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for
-// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist
-// together for the same operation.
-// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or
-// `multipart/form-data` are used as the content type of the request (in Swagger's definition,
-// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used
-// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be
-// declared together with a body parameter for the same operation. Form parameters have a different format based on
-// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4).
-// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload.
-// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple
-// parameters that are being transferred.
-// * `multipart/form-data` - each parameter takes a section in the payload with an internal header.
-// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is
-// `submit-name`. This type of form parameters is more commonly used for file transfers.
+// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part
+// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`,
+// the path parameter is `itemId`.
+// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
+// - Header - Custom headers that are expected as part of the request.
+// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be
+// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for
+// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist
+// together for the same operation.
+// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or
+// `multipart/form-data` are used as the content type of the request (in Swagger's definition,
+// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used
+// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be
+// declared together with a body parameter for the same operation. Form parameters have a different format based on
+// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4).
+// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload.
+// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple
+// parameters that are being transferred.
+// - `multipart/form-data` - each parameter takes a section in the payload with an internal header.
+// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is
+// `submit-name`. This type of form parameters is more commonly used for file transfers.
//
// For more information: http://goo.gl/8us55a#parameterObject
type Parameter struct {
diff --git a/test/tools/vendor/github.com/go-openapi/spec/schema_loader.go b/test/tools/vendor/github.com/go-openapi/spec/schema_loader.go
index b81175afdf4..0059b99aed5 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/schema_loader.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/schema_loader.go
@@ -168,14 +168,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error)
normalized := normalizeBase(pth)
debugLog("loading doc from: %s", normalized)
- unescaped, err := url.PathUnescape(normalized)
- if err != nil {
- return nil, url.URL{}, false, err
- }
-
- u := url.URL{Path: unescaped}
-
- data, fromCache := r.cache.Get(u.RequestURI())
+ data, fromCache := r.cache.Get(normalized)
if fromCache {
return data, toFetch, fromCache, nil
}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/test/tools/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json
new file mode 100644
index 00000000000..bcbb84743e3
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json
@@ -0,0 +1,149 @@
+{
+ "id": "http://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "$schema": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "#/definitions/positiveInteger" },
+ "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/positiveInteger" },
+ "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/test/tools/vendor/github.com/go-openapi/spec/schemas/v2/schema.json
new file mode 100644
index 00000000000..ebe10ed32d6
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/spec/schemas/v2/schema.json
@@ -0,0 +1,1607 @@
+{
+ "title": "A JSON Schema for Swagger 2.0 API.",
+ "id": "http://swagger.io/v2/schema.json#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "required": [
+ "swagger",
+ "info",
+ "paths"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "swagger": {
+ "type": "string",
+ "enum": [
+ "2.0"
+ ],
+ "description": "The Swagger version of this document."
+ },
+ "info": {
+ "$ref": "#/definitions/info"
+ },
+ "host": {
+ "type": "string",
+ "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$",
+ "description": "The host (name or ip) of the API. Example: 'swagger.io'"
+ },
+ "basePath": {
+ "type": "string",
+ "pattern": "^/",
+ "description": "The base path to the API. Example: '/api'."
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "consumes": {
+ "description": "A list of MIME types accepted by the API.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "paths": {
+ "$ref": "#/definitions/paths"
+ },
+ "definitions": {
+ "$ref": "#/definitions/definitions"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parameterDefinitions"
+ },
+ "responses": {
+ "$ref": "#/definitions/responseDefinitions"
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ },
+ "securityDefinitions": {
+ "$ref": "#/definitions/securityDefinitions"
+ },
+ "tags": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/tag"
+ },
+ "uniqueItems": true
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "definitions": {
+ "info": {
+ "type": "object",
+ "description": "General information about the API.",
+ "required": [
+ "version",
+ "title"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "title": {
+ "type": "string",
+ "description": "A unique and precise title of the API."
+ },
+ "version": {
+ "type": "string",
+ "description": "A semantic version number of the API."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed."
+ },
+ "termsOfService": {
+ "type": "string",
+ "description": "The terms of service for the API."
+ },
+ "contact": {
+ "$ref": "#/definitions/contact"
+ },
+ "license": {
+ "$ref": "#/definitions/license"
+ }
+ }
+ },
+ "contact": {
+ "type": "object",
+ "description": "Contact information for the owners of the API.",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The identifying name of the contact person/organization."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the contact information.",
+ "format": "uri"
+ },
+ "email": {
+ "type": "string",
+ "description": "The email address of the contact person/organization.",
+ "format": "email"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "license": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the license type. It's encouraged to use an OSI compatible license."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the license.",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "paths": {
+ "type": "object",
+ "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ },
+ "^/": {
+ "$ref": "#/definitions/pathItem"
+ }
+ },
+ "additionalProperties": false
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "description": "One or more JSON objects describing the schemas being consumed and produced by the API."
+ },
+ "parameterDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/parameter"
+ },
+ "description": "One or more JSON representations for parameters"
+ },
+ "responseDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/response"
+ },
+ "description": "One or more JSON representations for responses"
+ },
+ "externalDocs": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "information about external documentation",
+ "required": [
+ "url"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "examples": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "mimeType": {
+ "type": "string",
+ "description": "The MIME type of the HTTP message."
+ },
+ "operation": {
+ "type": "object",
+ "required": [
+ "responses"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "tags": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "summary": {
+ "type": "string",
+ "description": "A brief summary of the operation."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the operation, GitHub Flavored Markdown is allowed."
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "operationId": {
+ "type": "string",
+ "description": "A unique identifier of the operation."
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "consumes": {
+ "description": "A list of MIME types the API can consume.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ },
+ "responses": {
+ "$ref": "#/definitions/responses"
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ }
+ }
+ },
+ "pathItem": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "get": {
+ "$ref": "#/definitions/operation"
+ },
+ "put": {
+ "$ref": "#/definitions/operation"
+ },
+ "post": {
+ "$ref": "#/definitions/operation"
+ },
+ "delete": {
+ "$ref": "#/definitions/operation"
+ },
+ "options": {
+ "$ref": "#/definitions/operation"
+ },
+ "head": {
+ "$ref": "#/definitions/operation"
+ },
+ "patch": {
+ "$ref": "#/definitions/operation"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ }
+ }
+ },
+ "responses": {
+ "type": "object",
+ "description": "Response objects names can either be any valid HTTP status code or 'default'.",
+ "minProperties": 1,
+ "additionalProperties": false,
+ "patternProperties": {
+ "^([0-9]{3})$|^(default)$": {
+ "$ref": "#/definitions/responseValue"
+ },
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "not": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ }
+ },
+ "responseValue": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/response"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "response": {
+ "type": "object",
+ "required": [
+ "description"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "$ref": "#/definitions/fileSchema"
+ }
+ ]
+ },
+ "headers": {
+ "$ref": "#/definitions/headers"
+ },
+ "examples": {
+ "$ref": "#/definitions/examples"
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "headers": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/header"
+ }
+ },
+ "header": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "vendorExtension": {
+ "description": "Any property starting with x- is valid.",
+ "additionalProperties": true,
+ "additionalItems": true
+ },
+ "bodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "schema"
+ ],
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "body"
+ ]
+ },
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "schema": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "additionalProperties": false
+ },
+ "headerParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "header"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "queryParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "formDataParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "formData"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array",
+ "file"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "pathParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "required"
+ ],
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "enum": [
+ true
+ ],
+ "description": "Determines whether or not this parameter is required or optional."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "path"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "nonBodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "type"
+ ],
+ "oneOf": [
+ {
+ "$ref": "#/definitions/headerParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/formDataParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/queryParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/pathParameterSubSchema"
+ }
+ ]
+ },
+ "parameter": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/bodyParameter"
+ },
+ {
+ "$ref": "#/definitions/nonBodyParameter"
+ }
+ ]
+ },
+ "schema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "maxProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "boolean"
+ }
+ ],
+ "default": {}
+ },
+ "type": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/type"
+ },
+ "items": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ }
+ ],
+ "default": {}
+ },
+ "allOf": {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "default": {}
+ },
+ "discriminator": {
+ "type": "string"
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "xml": {
+ "$ref": "#/definitions/xml"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "fileSchema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "file"
+ ]
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "primitivesItems": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "security": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/securityRequirement"
+ },
+ "uniqueItems": true
+ },
+ "securityRequirement": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ },
+ "xml": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "namespace": {
+ "type": "string"
+ },
+ "prefix": {
+ "type": "string"
+ },
+ "attribute": {
+ "type": "boolean",
+ "default": false
+ },
+ "wrapped": {
+ "type": "boolean",
+ "default": false
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "securityDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/basicAuthenticationSecurity"
+ },
+ {
+ "$ref": "#/definitions/apiKeySecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ImplicitSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2PasswordSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ApplicationSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2AccessCodeSecurity"
+ }
+ ]
+ }
+ },
+ "basicAuthenticationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "basic"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "apiKeySecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "name",
+ "in"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "apiKey"
+ ]
+ },
+ "name": {
+ "type": "string"
+ },
+ "in": {
+ "type": "string",
+ "enum": [
+ "header",
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ImplicitSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "implicit"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2PasswordSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "password"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ApplicationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "application"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2AccessCodeSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "accessCode"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2Scopes": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "mediaTypeList": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/mimeType"
+ },
+ "uniqueItems": true
+ },
+ "parametersList": {
+ "type": "array",
+ "description": "The parameters needed to send a valid API call.",
+ "additionalItems": false,
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/parameter"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+ "schemesList": {
+ "type": "array",
+ "description": "The transfer protocol of the API.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "http",
+ "https",
+ "ws",
+ "wss"
+ ]
+ },
+ "uniqueItems": true
+ },
+ "collectionFormat": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes"
+ ],
+ "default": "csv"
+ },
+ "collectionFormatWithMulti": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes",
+ "multi"
+ ],
+ "default": "csv"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "jsonReference": {
+ "type": "object",
+ "required": [
+ "$ref"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "$ref": {
+ "type": "string"
+ }
+ }
+ }
+ }
+}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/spec.go b/test/tools/vendor/github.com/go-openapi/spec/spec.go
index 7d38b6e6251..876aa12759d 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/spec.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/spec.go
@@ -26,7 +26,7 @@ import (
const (
// SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs
SwaggerSchemaURL = "http://swagger.io/v2/schema.json#"
- // JSONSchemaURL the url for the json schema schema
+ // JSONSchemaURL the url for the json schema
JSONSchemaURL = "http://json-schema.org/draft-04/schema#"
)
@@ -41,7 +41,7 @@ func MustLoadJSONSchemaDraft04() *Schema {
// JSONSchemaDraft04 loads the json schema document for json shema draft04
func JSONSchemaDraft04() (*Schema, error) {
- b, err := Asset("jsonschema-draft-04.json")
+ b, err := jsonschemaDraft04JSONBytes()
if err != nil {
return nil, err
}
@@ -65,7 +65,7 @@ func MustLoadSwagger20Schema() *Schema {
// Swagger20Schema loads the swagger 2.0 schema from the embedded assets
func Swagger20Schema() (*Schema, error) {
- b, err := Asset("v2/schema.json")
+ b, err := v2SchemaJSONBytes()
if err != nil {
return nil, err
}
diff --git a/test/tools/vendor/github.com/go-openapi/spec/swagger.go b/test/tools/vendor/github.com/go-openapi/spec/swagger.go
index 44722ffd5ad..1590fd1751b 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/swagger.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/swagger.go
@@ -253,7 +253,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
// UnmarshalJSON converts this bool or schema object from a JSON structure
func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
var nw SchemaOrBool
- if len(data) >= 4 {
+ if len(data) > 0 {
if data[0] == '{' {
var sch Schema
if err := json.Unmarshal(data, &sch); err != nil {
@@ -261,7 +261,7 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
}
nw.Schema = &sch
}
- nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e')
+ nw.Allows = !bytes.Equal(data, []byte("false"))
}
*s = nw
return nil
diff --git a/test/tools/vendor/github.com/go-openapi/spec/url_go18.go b/test/tools/vendor/github.com/go-openapi/spec/url_go18.go
deleted file mode 100644
index 60b78515363..00000000000
--- a/test/tools/vendor/github.com/go-openapi/spec/url_go18.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//go:build !go1.19
-// +build !go1.19
-
-package spec
-
-import "net/url"
-
-var parseURL = url.Parse
diff --git a/test/tools/vendor/github.com/go-openapi/spec/url_go19.go b/test/tools/vendor/github.com/go-openapi/spec/url_go19.go
index 392e3e6395b..5bdfe40bcc1 100644
--- a/test/tools/vendor/github.com/go-openapi/spec/url_go19.go
+++ b/test/tools/vendor/github.com/go-openapi/spec/url_go19.go
@@ -1,6 +1,3 @@
-//go:build go1.19
-// +build go1.19
-
package spec
import "net/url"
diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/.golangci.yml b/test/tools/vendor/github.com/go-openapi/strfmt/.golangci.yml
index be4899cb125..22f8d21cca1 100644
--- a/test/tools/vendor/github.com/go-openapi/strfmt/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/strfmt/.golangci.yml
@@ -4,56 +4,58 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 31
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
linters:
- enable:
- - revive
- - goimports
- - gosec
+ enable-all: true
+ disable:
+ - maligned
- unparam
- - unconvert
- - predeclared
- - prealloc
- - misspell
-
- # disable:
- # - maligned
- # - lll
- # - gochecknoinits
- # - gochecknoglobals
- # - godox
- # - gocognit
- # - whitespace
- # - wsl
- # - funlen
- # - wrapcheck
- # - testpackage
- # - nlreturn
- # - gofumpt
- # - goerr113
- # - gci
- # - gomnd
- # - godot
- # - exhaustivestruct
- # - paralleltest
- # - varnamelen
- # - ireturn
- # - exhaustruct
- # #- thelper
-
-issues:
- exclude-rules:
- - path: bson.go
- text: "should be .*ObjectID"
- linters:
- - golint
- - stylecheck
-
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/README.md b/test/tools/vendor/github.com/go-openapi/strfmt/README.md
index 0cf89d77661..f6b39c6c56c 100644
--- a/test/tools/vendor/github.com/go-openapi/strfmt/README.md
+++ b/test/tools/vendor/github.com/go-openapi/strfmt/README.md
@@ -1,8 +1,7 @@
-# Strfmt [](https://travis-ci.org/go-openapi/strfmt) [](https://codecov.io/gh/go-openapi/strfmt) [](https://slackin.goswagger.io)
-
+# Strfmt [](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/strfmt)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE)
[](http://godoc.org/github.com/go-openapi/strfmt)
-[](https://golangci.com)
[](https://goreportcard.com/report/github.com/go-openapi/strfmt)
This package exposes a registry of data types to support string formats in the go-openapi toolkit.
diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/bson.go b/test/tools/vendor/github.com/go-openapi/strfmt/bson.go
index a8a3604a2c3..cfa9a526feb 100644
--- a/test/tools/vendor/github.com/go-openapi/strfmt/bson.go
+++ b/test/tools/vendor/github.com/go-openapi/strfmt/bson.go
@@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool {
// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID)
//
// swagger:strfmt bsonobjectid
-type ObjectId bsonprim.ObjectID //nolint:revive
+type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck
// NewObjectId creates a ObjectId from a Hex String
-func NewObjectId(hex string) ObjectId { //nolint:revive
+func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck
oid, err := bsonprim.ObjectIDFromHex(hex)
if err != nil {
panic(err)
@@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error {
// BSON document if the error is nil.
func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) {
oid := bsonprim.ObjectID(id)
- return bsontype.ObjectID, oid[:], nil
+ return bson.TypeObjectID, oid[:], nil
}
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/default.go b/test/tools/vendor/github.com/go-openapi/strfmt/default.go
index a89a4de3f38..2813714060e 100644
--- a/test/tools/vendor/github.com/go-openapi/strfmt/default.go
+++ b/test/tools/vendor/github.com/go-openapi/strfmt/default.go
@@ -25,6 +25,7 @@ import (
"strings"
"github.com/asaskevich/govalidator"
+ "github.com/google/uuid"
"go.mongodb.org/mongo-driver/bson"
)
@@ -57,24 +58,35 @@ const (
// - long top-level domain names (e.g. example.london) are permitted
// - symbol unicode points are permitted (e.g. emoji) (not for top-level domain)
HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$`
+
+ // json null type
+ jsonNull = "null"
+)
+
+const (
// UUIDPattern Regex for UUID that allows uppercase
- UUIDPattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$`
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)`
+
// UUID3Pattern Regex for UUID3 that allows uppercase
- UUID3Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$`
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)`
+
// UUID4Pattern Regex for UUID4 that allows uppercase
- UUID4Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$`
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
+
// UUID5Pattern Regex for UUID5 that allows uppercase
- UUID5Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$`
- // json null type
- jsonNull = "null"
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
)
var (
rxHostname = regexp.MustCompile(HostnamePattern)
- rxUUID = regexp.MustCompile(UUIDPattern)
- rxUUID3 = regexp.MustCompile(UUID3Pattern)
- rxUUID4 = regexp.MustCompile(UUID4Pattern)
- rxUUID5 = regexp.MustCompile(UUID5Pattern)
)
// IsHostname returns true when the string is a valid hostname
@@ -99,24 +111,28 @@ func IsHostname(str string) bool {
return valid
}
-// IsUUID returns true is the string matches a UUID, upper case is allowed
+// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed
func IsUUID(str string) bool {
- return rxUUID.MatchString(str)
+ _, err := uuid.Parse(str)
+ return err == nil
}
-// IsUUID3 returns true is the string matches a UUID, upper case is allowed
+// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed
func IsUUID3(str string) bool {
- return rxUUID3.MatchString(str)
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(3)
}
-// IsUUID4 returns true is the string matches a UUID, upper case is allowed
+// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed
func IsUUID4(str string) bool {
- return rxUUID4.MatchString(str)
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(4)
}
-// IsUUID5 returns true is the string matches a UUID, upper case is allowed
+// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed
func IsUUID5(str string) bool {
- return rxUUID5.MatchString(str)
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(5)
}
// IsEmail validates an email address.
diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/format.go b/test/tools/vendor/github.com/go-openapi/strfmt/format.go
index ad3b3c355ba..888e107c381 100644
--- a/test/tools/vendor/github.com/go-openapi/strfmt/format.go
+++ b/test/tools/vendor/github.com/go-openapi/strfmt/format.go
@@ -16,6 +16,7 @@ package strfmt
import (
"encoding"
+ stderrors "errors"
"fmt"
"reflect"
"strings"
@@ -94,7 +95,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry {
}
// MapStructureHookFunc is a decode hook function for mapstructure
-func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop
+func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) {
if from.Kind() != reflect.String {
return obj, nil
@@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //
case "datetime":
input := data
if len(input) == 0 {
- return nil, fmt.Errorf("empty string is an invalid datetime format")
+ return nil, stderrors.New("empty string is an invalid datetime format")
}
return ParseDateTime(input)
case "duration":
diff --git a/test/tools/vendor/github.com/go-openapi/strfmt/time.go b/test/tools/vendor/github.com/go-openapi/strfmt/time.go
index 9bef4c3b335..f08ba4da5d4 100644
--- a/test/tools/vendor/github.com/go-openapi/strfmt/time.go
+++ b/test/tools/vendor/github.com/go-openapi/strfmt/time.go
@@ -76,6 +76,8 @@ const (
ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04"
// ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern.
ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05"
+ // short form of ISO8601TimeUniversalSortableDateTimePattern
+ ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02"
// DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6
DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$`
)
@@ -84,7 +86,7 @@ var (
rxDateTime = regexp.MustCompile(DateTimePattern)
// DateTimeFormats is the collection of formats used by ParseDateTime()
- DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern}
+ DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm}
// MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds)
MarshalFormat = RFC3339Millis
@@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(i64))
- return bsontype.DateTime, buf, nil
+ return bson.TypeDateTime, buf, nil
}
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
@@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
// wishes to retain the data after returning.
func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error {
- if tpe == bsontype.Null {
+ if tpe == bson.TypeNull {
*t = DateTime{}
return nil
}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/.gitignore b/test/tools/vendor/github.com/go-openapi/swag/.gitignore
index d69b53accc5..c4b1b64f04e 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/.gitignore
+++ b/test/tools/vendor/github.com/go-openapi/swag/.gitignore
@@ -2,3 +2,4 @@ secrets.yml
vendor
Godeps
.idea
+*.out
diff --git a/test/tools/vendor/github.com/go-openapi/swag/.golangci.yml b/test/tools/vendor/github.com/go-openapi/swag/.golangci.yml
index bf503e40001..80e2be0042f 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -4,14 +4,14 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 25
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 3
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -20,35 +20,41 @@ linters:
- lll
- gochecknoinits
- gochecknoglobals
- - nlreturn
- - testpackage
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
- wrapcheck
+ - testpackage
+ - nlreturn
- gomnd
- - exhaustive
- exhaustivestruct
- goerr113
- - wsl
- - whitespace
- - gofumpt
- - godot
+ - errorlint
- nestif
- - godox
- - funlen
- - gci
- - gocognit
+ - godot
+ - gofumpt
- paralleltest
+ - tparallel
- thelper
- ifshort
- - gomoddirectives
- - cyclop
- - forcetypeassert
- - ireturn
- - tagliatelle
- - varnamelen
- - goimports
- - tenv
- - golint
- exhaustruct
- - nilnil
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
- nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md b/test/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 00000000000..e7f28ed6b78
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/test/tools/vendor/github.com/go-openapi/swag/README.md b/test/tools/vendor/github.com/go-openapi/swag/README.md
index 217f6fa5054..a7292229980 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/README.md
+++ b/test/tools/vendor/github.com/go-openapi/swag/README.md
@@ -1,7 +1,8 @@
-# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
-[](http://godoc.org/github.com/go-openapi/swag)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
[](https://goreportcard.com/report/github.com/go-openapi/swag)
Contains a bunch of helper functions for go-openapi and go-swagger projects.
@@ -18,4 +19,5 @@ You may also use it standalone for your projects.
This repo has only few dependencies outside of the standard library:
-* YAML utilities depend on gopkg.in/yaml.v2
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/test/tools/vendor/github.com/go-openapi/swag/initialism_index.go b/test/tools/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 00000000000..20a359bb60a
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/loading.go b/test/tools/vendor/github.com/go-openapi/swag/loading.go
index 00038c3773c..783442fddf6 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/loading.go
+++ b/test/tools/vendor/github.com/go-openapi/swag/loading.go
@@ -21,6 +21,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
var LoadHTTPCustomHeaders = map[string]string{}
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
-func LoadFromFileOrHTTP(path string) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
-func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
}
-// LoadStrategy returns a loader function for a given path or uri
-func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
- if strings.HasPrefix(path, "http") {
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
return remote
}
- return func(pth string) ([]byte, error) {
- upth, err := pathUnescape(pth)
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
if err != nil {
return nil, err
}
- if strings.HasPrefix(pth, `file://`) {
- if runtime.GOOS == "windows" {
- // support for canonical file URIs on windows.
- // Zero tolerance here for dodgy URIs.
- u, _ := url.Parse(upth)
- if u.Host != "" {
- // assume UNC name (volume share)
- // file://host/share/folder\... ==> \\host\share\path\folder
- // NOTE: UNC port not yet supported
- upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
- } else {
- // file:///c:/folder/... ==> just remove the leading slash
- upth = strings.TrimPrefix(upth, `file:///`)
- }
- } else {
- upth = strings.TrimPrefix(upth, `file://`)
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
}
}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/name_lexem.go b/test/tools/vendor/github.com/go-openapi/swag/name_lexem.go
index aa7f6a9bb8e..8bb64ac32f9 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/name_lexem.go
+++ b/test/tools/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -14,74 +14,80 @@
package swag
-import "unicode"
+import (
+ "unicode"
+ "unicode/utf8"
+)
type (
- nameLexem interface {
- GetUnsafeGoName() string
- GetOriginal() string
- IsInitialism() bool
- }
+ lexemKind uint8
- initialismNameLexem struct {
+ nameLexem struct {
original string
matchedInitialism string
+ kind lexemKind
}
+)
- casualNameLexem struct {
- original string
- }
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
)
-func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
- return &initialismNameLexem{
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
-func newCasualNameLexem(original string) *casualNameLexem {
- return &casualNameLexem{
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
original: original,
}
}
-func (l *initialismNameLexem) GetUnsafeGoName() string {
- return l.matchedInitialism
-}
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
-func (l *casualNameLexem) GetUnsafeGoName() string {
- var first rune
- var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
+
if i > 0 {
rest = l.original[i:]
break
}
}
+
if len(l.original) > 1 {
- return string(unicode.ToUpper(first)) + lower(rest)
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
}
return l.original
}
-func (l *initialismNameLexem) GetOriginal() string {
+func (l nameLexem) GetOriginal() string {
return l.original
}
-func (l *casualNameLexem) GetOriginal() string {
- return l.original
-}
-
-func (l *initialismNameLexem) IsInitialism() bool {
- return true
-}
-
-func (l *casualNameLexem) IsInitialism() bool {
- return false
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/post_go18.go b/test/tools/vendor/github.com/go-openapi/swag/post_go18.go
deleted file mode 100644
index f5228b82c0f..00000000000
--- a/test/tools/vendor/github.com/go-openapi/swag/post_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.8
-// +build go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.PathUnescape(path)
-}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/post_go19.go b/test/tools/vendor/github.com/go-openapi/swag/post_go19.go
deleted file mode 100644
index 7c7da9c0880..00000000000
--- a/test/tools/vendor/github.com/go-openapi/swag/post_go19.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Since go1.9, this may be implemented with sync.Map.
-type indexOfInitialisms struct {
- sortMutex *sync.Mutex
- index *sync.Map
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- sortMutex: new(sync.Mutex),
- index: new(sync.Map),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- for k, v := range initial {
- m.index.Store(k, v)
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- _, ok := m.index.Load(key)
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.index.Store(key, true)
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- m.index.Range(func(key, value interface{}) bool {
- k := key.(string)
- result = append(result, k)
- return true
- })
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/pre_go18.go b/test/tools/vendor/github.com/go-openapi/swag/pre_go18.go
deleted file mode 100644
index 2757d9b95f8..00000000000
--- a/test/tools/vendor/github.com/go-openapi/swag/pre_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.8
-// +build !go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.QueryUnescape(path)
-}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/pre_go19.go b/test/tools/vendor/github.com/go-openapi/swag/pre_go19.go
deleted file mode 100644
index 0565db377be..00000000000
--- a/test/tools/vendor/github.com/go-openapi/swag/pre_go19.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Before go1.9, this may be implemented with a mutex on the map.
-type indexOfInitialisms struct {
- getMutex *sync.Mutex
- index map[string]bool
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- getMutex: new(sync.Mutex),
- index: make(map[string]bool, 50),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k, v := range initial {
- m.index[k] = v
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- _, ok := m.index[key]
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- m.index[key] = true
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k := range m.index {
- result = append(result, k)
- }
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/split.go b/test/tools/vendor/github.com/go-openapi/swag/split.go
index a1825fb7dc9..274727a866c 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/split.go
+++ b/test/tools/vendor/github.com/go-openapi/swag/split.go
@@ -15,124 +15,269 @@
package swag
import (
+ "bytes"
+ "sync"
"unicode"
+ "unicode/utf8"
)
-var nameReplaceTable = map[rune]string{
- '@': "At ",
- '&': "And ",
- '|': "Pipe ",
- '$': "Dollar ",
- '!': "Bang ",
- '-': "",
- '_': "",
-}
-
type (
splitter struct {
- postSplitInitialismCheck bool
initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
+ }
+
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
+)
+
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
+
+ matchesPool struct {
+ *sync.Pool
}
- splitterOption func(*splitter) *splitter
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
)
-// split calls the splitter; splitter provides more control and post options
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
func split(str string) []string {
- lexems := newSplitter().split(str)
- result := make([]string, 0, len(lexems))
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
- for _, lexem := range lexems {
+ for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
return result
}
-func (s *splitter) split(str string) []nameLexem {
- return s.toNameLexems(str)
-}
-
-func newSplitter(options ...splitterOption) *splitter {
- splitter := &splitter{
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
- splitter = option(splitter)
+ option(&s)
}
- return splitter
+ return s
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
-func withPostSplitInitialismCheck(s *splitter) *splitter {
+func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
+}
+
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
return s
}
-type (
- initialismMatch struct {
- start, end int
- body []rune
- complete bool
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
}
- initialismMatches []*initialismMatch
-)
-func (s *splitter) toNameLexems(name string) []nameLexem {
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
return s.mapMatchesToNameLexems(nameRunes, matches)
}
-func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
- matches := make(initialismMatches, 0)
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
- newMatches := make(initialismMatches, 0, len(matches))
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
- for _, match := range matches {
- if keepCompleteMatch := match.complete; keepCompleteMatch {
- newMatches = append(newMatches, match)
- continue
- }
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
+ }
- // drop failed match
- currentMatchRune := match.body[currentRunePosition-match.start]
- if !s.initialismRuneEqual(currentMatchRune, currentRune) {
- continue
- }
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
- // try to complete ongoing match
- if currentRunePosition-match.start == len(match.body)-1 {
- // we are close; the next step is to check the symbol ahead
- // if it is a small letter, then it is not the end of match
- // but beginning of the next word
-
- if currentRunePosition < len(nameRunes)-1 {
- nextRune := nameRunes[currentRunePosition+1]
- if newWord := unicode.IsLower(nextRune); newWord {
- // oh ok, it was the start of a new word
- continue
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
}
+
+ match.complete = true
+ match.end = currentRunePosition
}
- match.complete = true
- match.end = currentRunePosition
+ *newMatches = append(*newMatches, match)
}
-
- newMatches = append(newMatches, match)
}
// check for new initialism matches
- for _, initialism := range s.initialisms {
- initialismRunes := []rune(initialism)
- if s.initialismRuneEqual(initialismRunes[0], currentRune) {
- newMatches = append(newMatches, &initialismMatch{
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
matches = newMatches
}
+ // up to the caller to redeem this last slice
return matches
}
-func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
- nameLexems := make([]nameLexem, 0)
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
- var lastAcceptedMatch *initialismMatch
- for _, match := range matches {
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
if !match.complete {
continue
}
- if firstMatch := lastAcceptedMatch == nil; firstMatch {
- nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
- nameLexems = append(nameLexems, s.breakCasualString(middle)...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
- if lastAcceptedMatch == nil {
- return s.breakCasualString(nameRunes)
- }
-
- if lastAcceptedMatch.end+1 != len(nameRunes) {
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
- nameLexems = append(nameLexems, s.breakCasualString(rest)...)
+ s.appendBrokenDownCasualString(nameLexems, rest)
}
- return nameLexems
-}
+ poolOfMatches.RedeemMatches(matches)
-func (s *splitter) initialismRuneEqual(a, b rune) bool {
- return a == b
+ return nameLexems
}
-func (s *splitter) breakInitialism(original string) nameLexem {
+func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
-func (s *splitter) breakCasualString(str []rune) []nameLexem {
- segments := make([]nameLexem, 0)
- currentSegment := ""
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
addCasualNameLexem := func(original string) {
- segments = append(segments, newCasualNameLexem(original))
+ *segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
- segments = append(segments, newInitialismNameLexem(original, match))
+ *segments = append(*segments, newInitialismNameLexem(original, match))
}
- addNameLexem := func(original string) {
- if s.postSplitInitialismCheck {
- for _, initialism := range s.initialisms {
- if upper(initialism) == upper(original) {
- addInitialismNameLexem(original, initialism)
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
return
}
}
- }
- addCasualNameLexem(original)
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
}
- for _, rn := range string(str) {
- if replace, found := nameReplaceTable[rn]; found {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
- currentSegment = ""
+ currentSegment.Reset()
}
- currentSegment += string(rn)
+ currentSegment.WriteRune(rn)
+ }
+
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ i += size
}
- return segments
+ return true
}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/string_bytes.go b/test/tools/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 00000000000..90745d5ca9f
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/test/tools/vendor/github.com/go-openapi/swag/util.go b/test/tools/vendor/github.com/go-openapi/swag/util.go
index d971fbe34b4..5051401c49f 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/util.go
+++ b/test/tools/vendor/github.com/go-openapi/swag/util.go
@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
+ "unicode/utf8"
)
-// commonInitialisms are common acronyms that are kept as whole uppercased words.
-var commonInitialisms *indexOfInitialisms
-
-// initialisms is a slice of sorted initialisms
-var initialisms []string
-
-var isInitialism func(string) bool
-
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
-func init() {
- // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
- var configuredInitialisms = map[string]bool{
- "ACL": true,
- "API": true,
- "ASCII": true,
- "CPU": true,
- "CSS": true,
- "DNS": true,
- "EOF": true,
- "GUID": true,
- "HTML": true,
- "HTTPS": true,
- "HTTP": true,
- "ID": true,
- "IP": true,
- "IPv4": true,
- "IPv6": true,
- "JSON": true,
- "LHS": true,
- "OAI": true,
- "QPS": true,
- "RAM": true,
- "RHS": true,
- "RPC": true,
- "SLA": true,
- "SMTP": true,
- "SQL": true,
- "SSH": true,
- "TCP": true,
- "TLS": true,
- "TTL": true,
- "UDP": true,
- "UI": true,
- "UID": true,
- "UUID": true,
- "URI": true,
- "URL": true,
- "UTF8": true,
- "VM": true,
- "XML": true,
- "XMPP": true,
- "XSRF": true,
- "XSS": true,
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
}
- // a thread-safe index of initialisms
- commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
- initialisms = commonInitialisms.sorted()
-
- // a test function
- isInitialism = commonInitialisms.isInitialism
+ return GoNamePrefixFunc(name) + in
}
const (
@@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
-type byInitialism []string
-
-func (s byInitialism) Len() int {
- return len(s)
-}
-func (s byInitialism) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s byInitialism) Less(i, j int) bool {
- if len(s[i]) != len(s[j]) {
- return len(s[i]) < len(s[j])
- }
-
- return strings.Compare(s[i], s[j]) > 0
-}
-
// Removes leading whitespaces
func trim(str string) string {
- return strings.Trim(str, " ")
+ return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
-func Camelize(word string) (camelized string) {
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
for pos, ru := range []rune(word) {
if pos > 0 {
- camelized += string(unicode.ToLower(ru))
+ camelized.WriteRune(unicode.ToLower(ru))
} else {
- camelized += string(unicode.ToUpper(ru))
+ camelized.WriteRune(unicode.ToUpper(ru))
}
}
- return
+ return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
- out := make([]string, 0, len(in))
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
- for _, w := range in {
+ for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
- out = append(out, w.GetOriginal())
+ out = append(out, trim(w.GetOriginal()))
}
}
+ poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
- out := make([]string, 0, len(in))
- for _, w := range in {
- original := w.GetOriginal()
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
+ poolOfLexems.RedeemLexems(in)
+
return strings.Join(out, " ")
}
@@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
- out = append(out, Camelize(w))
+ out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
- lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
+
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
+
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
- result := ""
- for _, lexem := range lexems {
+ for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
- result += goName
+ result.WriteString(goName)
}
- if len(result) > 0 {
- // Only prefix with X when the first character isn't an ascii letter
- first := []rune(result)[0]
- if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
- if GoNamePrefixFunc == nil {
- return "X" + result
- }
- result = GoNamePrefixFunc(name) + result
- }
- first = []rune(result)[0]
- if unicode.IsLetter(first) && !unicode.IsUpper(first) {
- result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
- }
- }
-
- return result
+ return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -343,7 +323,7 @@ type zeroable interface {
func IsZero(data interface{}) bool {
v := reflect.ValueOf(data)
// check for nil data
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v.IsNil() {
return true
@@ -356,7 +336,7 @@ func IsZero(data interface{}) bool {
}
// continue with slightly more complex reflection
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.String:
return v.Len() == 0
case reflect.Bool:
@@ -376,16 +356,6 @@ func IsZero(data interface{}) bool {
}
}
-// AddInitialisms add additional initialisms
-func AddInitialisms(words ...string) {
- for _, word := range words {
- // commonInitialisms[upper(word)] = true
- commonInitialisms.add(upper(word))
- }
- // sort again
- initialisms = commonInitialisms.sorted()
-}
-
// CommandLineOptionsGroup represents a group of user-defined command line options
type CommandLineOptionsGroup struct {
ShortDescription string
diff --git a/test/tools/vendor/github.com/go-openapi/swag/yaml.go b/test/tools/vendor/github.com/go-openapi/swag/yaml.go
index f09ee609f3b..f59e0259320 100644
--- a/test/tools/vendor/github.com/go-openapi/swag/yaml.go
+++ b/test/tools/vendor/github.com/go-openapi/swag/yaml.go
@@ -16,8 +16,11 @@ package swag
import (
"encoding/json"
+ "errors"
"fmt"
"path/filepath"
+ "reflect"
+ "sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
- return nil, fmt.Errorf("only YAML documents that are objects are supported")
+ return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}
@@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
case yamlTimestamp:
return node.Value, nil
case yamlNull:
- return nil, nil
+ return nil, nil //nolint:nilnil
default:
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
}
@@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
- for k, v := range val {
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T", val)
}
- return nil, nil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
diff --git a/test/tools/vendor/github.com/go-openapi/validate/.golangci.yml b/test/tools/vendor/github.com/go-openapi/validate/.golangci.yml
index 81818ca6788..22f8d21cca1 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/.golangci.yml
+++ b/test/tools/vendor/github.com/go-openapi/validate/.golangci.yml
@@ -1,12 +1,14 @@
linters-settings:
govet:
check-shadowing: true
+ golint:
+ min-confidence: 0
gocyclo:
- min-complexity: 50
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
min-occurrences: 3
@@ -15,36 +17,45 @@ linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - tparallel
+ - gofumpt
- paralleltest
- - cyclop # because we have gocyclo already
- # TODO: review the linters below. We disabled them to make the CI pass first.
- - ireturn
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
- varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
- forcetypeassert
- - thelper
- # Disable deprecated linters.
- # They will be removed from golangci-lint in future.
+ - cyclop
+ # deprecated linters
+ - deadcode
- interfacer
- - golint
\ No newline at end of file
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/test/tools/vendor/github.com/go-openapi/validate/BENCHMARK.md b/test/tools/vendor/github.com/go-openapi/validate/BENCHMARK.md
new file mode 100644
index 00000000000..79cf6a077ba
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/validate/BENCHMARK.md
@@ -0,0 +1,31 @@
+# Benchmark
+
+Validating the Kubernetes Swagger API
+
+## v0.22.6: 60,000,000 allocs
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op
+```
+
+## After refact PR: minor but noticable improvements: 25,000,000 allocs
+```
+go test -bench Spec
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op
+```
+
+## After reduce GC pressure PR: 17,000,000 allocs
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op
+```
diff --git a/test/tools/vendor/github.com/go-openapi/validate/README.md b/test/tools/vendor/github.com/go-openapi/validate/README.md
index ea2d68cb683..e8e1bb218d9 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/README.md
+++ b/test/tools/vendor/github.com/go-openapi/validate/README.md
@@ -1,7 +1,5 @@
-# Validation helpers
-[](https://travis-ci.org/go-openapi/validate)
-[](https://ci.appveyor.com/project/fredbi/validate/branch/master)
-[](https://codecov.io/gh/go-openapi/validate)
+# Validation helpers [](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/validate)
+
[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE)
[](https://pkg.go.dev/github.com/go-openapi/validate)
@@ -24,7 +22,7 @@ Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/m
* Minimum, Maximum, MultipleOf
* FormatOf
-[Documentation](https://godoc.org/github.com/go-openapi/validate)
+[Documentation](https://pkg.go.dev/github.com/go-openapi/validate)
## FAQ
diff --git a/test/tools/vendor/github.com/go-openapi/validate/appveyor.yml b/test/tools/vendor/github.com/go-openapi/validate/appveyor.yml
deleted file mode 100644
index 89e5bccb3a5..00000000000
--- a/test/tools/vendor/github.com/go-openapi/validate/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: "0.1.{build}"
-
-clone_folder: C:\go-openapi\validate
-shallow_clone: true # for startup speed
-pull_requests:
- do_not_increment_build_number: true
-
-#skip_tags: true
-#skip_branch_with_pr: true
-
-# appveyor.yml
-build: off
-
-environment:
- GOPATH: c:\gopath
-
-stack: go 1.15
-
-test_script:
- - go test -v -timeout 20m -args -enable-long ./...
-
-deploy: off
-
-notifications:
- - provider: Slack
- incoming_webhook: https://hooks.slack.com/services/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
- auth_token:
- secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4=
- channel: bots
- on_build_success: false
- on_build_failure: true
- on_build_status_changed: true
diff --git a/test/tools/vendor/github.com/go-openapi/validate/default_validator.go b/test/tools/vendor/github.com/go-openapi/validate/default_validator.go
index bd14c2a269f..e0dd93839ec 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/default_validator.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/default_validator.go
@@ -25,48 +25,55 @@ import (
// According to Swagger spec, default values MUST validate their schema.
type defaultValidator struct {
SpecValidator *SpecValidator
- visitedSchemas map[string]bool
+ visitedSchemas map[string]struct{}
+ schemaOptions *SchemaValidatorOptions
}
// resetVisited resets the internal state of visited schemas
func (d *defaultValidator) resetVisited() {
- d.visitedSchemas = map[string]bool{}
+ if d.visitedSchemas == nil {
+ d.visitedSchemas = make(map[string]struct{})
+
+ return
+ }
+
+ // TODO(go1.21): clear(ex.visitedSchemas)
+ for k := range d.visitedSchemas {
+ delete(d.visitedSchemas, k)
+ }
}
-func isVisited(path string, visitedSchemas map[string]bool) bool {
- found := visitedSchemas[path]
- if !found {
- // search for overlapping paths
- frags := strings.Split(path, ".")
- if len(frags) < 2 {
- // shortcut exit on smaller paths
- return found
+func isVisited(path string, visitedSchemas map[string]struct{}) bool {
+ _, found := visitedSchemas[path]
+ if found {
+ return true
+ }
+
+ // search for overlapping paths
+ var (
+ parent string
+ suffix string
+ )
+ for i := len(path) - 2; i >= 0; i-- {
+ r := path[i]
+ if r != '.' {
+ continue
}
- last := len(frags) - 1
- var currentFragStr, parent string
- for i := range frags {
- if i == 0 {
- currentFragStr = frags[last]
- } else {
- currentFragStr = strings.Join([]string{frags[last-i], currentFragStr}, ".")
- }
- if i < last {
- parent = strings.Join(frags[0:last-i], ".")
- } else {
- parent = ""
- }
- if strings.HasSuffix(parent, currentFragStr) {
- found = true
- break
- }
+
+ parent = path[0:i]
+ suffix = path[i+1:]
+
+ if strings.HasSuffix(parent, suffix) {
+ return true
}
}
- return found
+
+ return false
}
// beingVisited asserts a schema is being visited
func (d *defaultValidator) beingVisited(path string) {
- d.visitedSchemas[path] = true
+ d.visitedSchemas[path] = struct{}{}
}
// isVisited tells if a path has already been visited
@@ -75,8 +82,9 @@ func (d *defaultValidator) isVisited(path string) bool {
}
// Validate validates the default values declared in the swagger spec
-func (d *defaultValidator) Validate() (errs *Result) {
- errs = new(Result)
+func (d *defaultValidator) Validate() *Result {
+ errs := pools.poolOfResults.BorrowResult() // will redeem when merged
+
if d == nil || d.SpecValidator == nil {
return errs
}
@@ -89,7 +97,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// every default value that is specified must validate against the schema for that property
// headers, items, parameters, schema
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult() // will redeem when merged
s := d.SpecValidator
for method, pathItem := range s.expandedAnalyzer().Operations() {
@@ -107,10 +115,12 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// default values provided must validate against their inline definition (no explicit schema)
if param.Default != nil && param.Schema == nil {
// check param default value is valid
- red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Default) //#nosec
+ red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -120,6 +130,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -129,6 +141,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
}
@@ -154,7 +168,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// reset explored schemas to get depth-first recursive-proof exploration
d.resetVisited()
for nm, sch := range s.spec.Spec().Definitions {
- res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
}
}
return res
@@ -170,17 +184,18 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode)
- // nolint: dupl
if response.Headers != nil { // Safeguard
for nm, h := range response.Headers {
// reset explored schemas to get depth-first recursive-proof exploration
d.resetVisited()
if h.Default != nil {
- red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Default) //#nosec
+ red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -190,6 +205,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -209,6 +226,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
// Additional message to make sure the context of the error is not lost
res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
return res
@@ -220,11 +239,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
return nil
}
d.beingVisited(path)
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := d.SpecValidator
if schema.Default != nil {
- res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Default))
+ res.Merge(
+ newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default),
+ )
}
if schema.Items != nil {
if schema.Items.Schema != nil {
@@ -242,7 +263,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
// NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well)
- res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema))
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
}
for propName, prop := range schema.Properties {
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
@@ -251,7 +272,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
- res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema))
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
}
if schema.AllOf != nil {
for i, aoSch := range schema.AllOf {
@@ -262,13 +283,15 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
}
// TODO: Temporary duplicated code. Need to refactor with examples
-// nolint: dupl
+
func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := d.SpecValidator
if items != nil {
if items.Default != nil {
- res.Merge(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Default))
+ res.Merge(
+ newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default),
+ )
}
if items.Items != nil {
res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items))
diff --git a/test/tools/vendor/github.com/go-openapi/validate/doc.go b/test/tools/vendor/github.com/go-openapi/validate/doc.go
index f5ca9a5d580..d2b901eab9a 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/doc.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/doc.go
@@ -19,7 +19,7 @@ as well as tools to validate data against their schema.
This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference
can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md.
-Validating a specification
+# Validating a specification
Validates a spec document (from JSON or YAML) against the JSON schema for swagger,
then checks a number of extra rules that can't be expressed in JSON schema.
@@ -30,34 +30,36 @@ Entry points:
- SpecValidator.Validate()
Reported as errors:
- [x] definition can't declare a property that's already defined by one of its ancestors
- [x] definition's ancestor can't be a descendant of the same model
- [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method
- [x] each security reference should contain only unique scopes
- [x] each security scope in a security definition should be unique
- [x] parameters in path must be unique
- [x] each path parameter must correspond to a parameter placeholder and vice versa
- [x] each referenceable definition must have references
- [x] each definition property listed in the required array must be defined in the properties of the model
- [x] each parameter should have a unique `name` and `type` combination
- [x] each operation should have only 1 parameter of type body
- [x] each reference must point to a valid object
- [x] every default value that is specified must validate against the schema for that property
- [x] items property is required for all schemas/definitions of type `array`
- [x] path parameters must be declared a required
- [x] headers must not contain $ref
- [x] schema and property examples provided must validate against their respective object's schema
- [x] examples provided must validate their schema
+
+ [x] definition can't declare a property that's already defined by one of its ancestors
+ [x] definition's ancestor can't be a descendant of the same model
+ [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness.
+ [x] each security reference should contain only unique scopes
+ [x] each security scope in a security definition should be unique
+ [x] parameters in path must be unique
+ [x] each path parameter must correspond to a parameter placeholder and vice versa
+ [x] each referenceable definition must have references
+ [x] each definition property listed in the required array must be defined in the properties of the model
+ [x] each parameter should have a unique `name` and `type` combination
+ [x] each operation should have only 1 parameter of type body
+ [x] each reference must point to a valid object
+ [x] every default value that is specified must validate against the schema for that property
+ [x] items property is required for all schemas/definitions of type `array`
+ [x] path parameters must be declared a required
+ [x] headers must not contain $ref
+ [x] schema and property examples provided must validate against their respective object's schema
+ [x] examples provided must validate their schema
Reported as warnings:
- [x] path parameters should not contain any of [{,},\w]
- [x] empty path
- [x] unused definitions
- [x] unsupported validation of examples on non-JSON media types
- [x] examples in response without schema
- [x] readOnly properties should not be required
-Validating a schema
+ [x] path parameters should not contain any of [{,},\w]
+ [x] empty path
+ [x] unused definitions
+ [x] unsupported validation of examples on non-JSON media types
+ [x] examples in response without schema
+ [x] readOnly properties should not be required
+
+# Validating a schema
The schema validation toolkit validates data against JSON-schema-draft 04 schema.
@@ -70,16 +72,16 @@ Entry points:
- AgainstSchema()
- ...
-Known limitations
+# Known limitations
With the current version of this package, the following aspects of swagger are not yet supported:
- [ ] errors and warnings are not reported with key/line number in spec
- [ ] default values and examples on responses only support application/json producer type
- [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values
- [ ] rules for collectionFormat are not implemented
- [ ] no validation rule for polymorphism support (discriminator) [not done here]
- [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid
- [ ] arbitrary large numbers are not supported: max is math.MaxFloat64
+ [ ] errors and warnings are not reported with key/line number in spec
+ [ ] default values and examples on responses only support application/json producer type
+ [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values
+ [ ] rules for collectionFormat are not implemented
+ [ ] no validation rule for polymorphism support (discriminator) [not done here]
+ [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid
+ [ ] arbitrary large numbers are not supported: max is math.MaxFloat64
*/
package validate
diff --git a/test/tools/vendor/github.com/go-openapi/validate/example_validator.go b/test/tools/vendor/github.com/go-openapi/validate/example_validator.go
index c8bffd78e5a..d08956973ce 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/example_validator.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/example_validator.go
@@ -23,17 +23,27 @@ import (
// ExampleValidator validates example values defined in a spec
type exampleValidator struct {
SpecValidator *SpecValidator
- visitedSchemas map[string]bool
+ visitedSchemas map[string]struct{}
+ schemaOptions *SchemaValidatorOptions
}
// resetVisited resets the internal state of visited schemas
func (ex *exampleValidator) resetVisited() {
- ex.visitedSchemas = map[string]bool{}
+ if ex.visitedSchemas == nil {
+ ex.visitedSchemas = make(map[string]struct{})
+
+ return
+ }
+
+ // TODO(go1.21): clear(ex.visitedSchemas)
+ for k := range ex.visitedSchemas {
+ delete(ex.visitedSchemas, k)
+ }
}
// beingVisited asserts a schema is being visited
func (ex *exampleValidator) beingVisited(path string) {
- ex.visitedSchemas[path] = true
+ ex.visitedSchemas[path] = struct{}{}
}
// isVisited tells if a path has already been visited
@@ -48,9 +58,9 @@ func (ex *exampleValidator) isVisited(path string) bool {
// - schemas
// - individual property
// - responses
-//
-func (ex *exampleValidator) Validate() (errs *Result) {
- errs = new(Result)
+func (ex *exampleValidator) Validate() *Result {
+ errs := pools.poolOfResults.BorrowResult()
+
if ex == nil || ex.SpecValidator == nil {
return errs
}
@@ -65,7 +75,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// in: schemas, properties, object, items
// not in: headers, parameters without schema
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := ex.SpecValidator
for method, pathItem := range s.expandedAnalyzer().Operations() {
@@ -83,10 +93,12 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// default values provided must validate against their inline definition (no explicit schema)
if param.Example != nil && param.Schema == nil {
// check param default value is valid
- red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Example) //#nosec
+ red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -96,6 +108,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -105,6 +119,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
}
@@ -130,7 +146,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// reset explored schemas to get depth-first recursive-proof exploration
ex.resetVisited()
for nm, sch := range s.spec.Spec().Definitions {
- res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
}
}
return res
@@ -146,17 +162,18 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode)
- // nolint: dupl
if response.Headers != nil { // Safeguard
for nm, h := range response.Headers {
// reset explored schemas to get depth-first recursive-proof exploration
ex.resetVisited()
if h.Example != nil {
- red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Example) //#nosec
+ red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -166,6 +183,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -185,13 +204,17 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
// Additional message to make sure the context of the error is not lost
res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
if response.Examples != nil {
if response.Schema != nil {
if example, ok := response.Examples["application/json"]; ok {
- res.MergeAsWarnings(NewSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, SwaggerSchema(true)).Validate(example))
+ res.MergeAsWarnings(
+ newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example),
+ )
} else {
// TODO: validate other media types too
res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName))
@@ -210,10 +233,12 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
ex.beingVisited(path)
s := ex.SpecValidator
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if schema.Example != nil {
- res.MergeAsWarnings(NewSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Example))
+ res.MergeAsWarnings(
+ newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example),
+ )
}
if schema.Items != nil {
if schema.Items.Schema != nil {
@@ -231,7 +256,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
// NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well)
- res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema))
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
}
for propName, prop := range schema.Properties {
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
@@ -240,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
- res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema))
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
}
if schema.AllOf != nil {
for i, aoSch := range schema.AllOf {
@@ -251,13 +276,16 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
// TODO: Temporary duplicated code. Need to refactor with examples
-// nolint: dupl
+//
+
func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := ex.SpecValidator
if items != nil {
if items.Example != nil {
- res.MergeAsWarnings(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Example))
+ res.MergeAsWarnings(
+ newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example),
+ )
}
if items.Items != nil {
res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items))
@@ -266,5 +294,6 @@ func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in stri
res.AddErrors(invalidPatternInMsg(path, in, items.Pattern))
}
}
+
return res
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/formats.go b/test/tools/vendor/github.com/go-openapi/validate/formats.go
index 0ad996cbbc2..f4e35521306 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/formats.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/formats.go
@@ -22,10 +22,32 @@ import (
)
type formatValidator struct {
- Format string
Path string
In string
+ Format string
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var f *formatValidator
+ if opts.recycleValidators {
+ f = pools.poolOfFormatValidators.BorrowValidator()
+ } else {
+ f = new(formatValidator)
+ }
+
+ f.Path = path
+ f.In = in
+ f.Format = format
+ f.KnownFormats = formats
+ f.Options = opts
+
+ return f
}
func (f *formatValidator) SetPath(path string) {
@@ -33,37 +55,45 @@ func (f *formatValidator) SetPath(path string) {
}
func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool {
- doit := func() bool {
- if source == nil {
- return false
- }
- switch source := source.(type) {
- case *spec.Items:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- case *spec.Parameter:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- case *spec.Schema:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- case *spec.Header:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- }
+ if source == nil || f.KnownFormats == nil {
+ return false
+ }
+
+ switch source := source.(type) {
+ case *spec.Items:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Parameter:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Schema:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Header:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ default:
return false
}
- r := doit()
- debugLog("format validator for %q applies %t for %T (kind: %v)\n", f.Path, r, source, kind)
- return r
}
func (f *formatValidator) Validate(val interface{}) *Result {
- result := new(Result)
- debugLog("validating \"%v\" against format: %s", val, f.Format)
+ if f.Options.recycleValidators {
+ defer func() {
+ f.redeem()
+ }()
+ }
+
+ var result *Result
+ if f.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil {
result.AddErrors(err)
}
- if result.HasErrors() {
- return result
- }
- return nil
+ return result
+}
+
+func (f *formatValidator) redeem() {
+ pools.poolOfFormatValidators.RedeemValidator(f)
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/helpers.go b/test/tools/vendor/github.com/go-openapi/validate/helpers.go
index 48ebfab58e5..757e403d912 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/helpers.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/helpers.go
@@ -101,9 +101,17 @@ type errorHelper struct {
// A collection of unexported helpers for error construction
}
-func (h *errorHelper) sErr(err errors.Error) *Result {
+func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result {
// Builds a Result from standard errors.Error
- return &Result{Errors: []error{err}}
+ var result *Result
+ if recycle {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+ result.Errors = []error{err}
+
+ return result
}
func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result {
@@ -157,7 +165,7 @@ func (h *valueHelper) asInt64(val interface{}) int64 {
// Number conversion function for int64, without error checking
// (implements an implicit type upgrade).
v := reflect.ValueOf(val)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@@ -174,7 +182,7 @@ func (h *valueHelper) asUint64(val interface{}) uint64 {
// Number conversion function for uint64, without error checking
// (implements an implicit type upgrade).
v := reflect.ValueOf(val)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return uint64(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@@ -192,7 +200,7 @@ func (h *valueHelper) asFloat64(val interface{}) float64 {
// Number conversion function for float64, without error checking
// (implements an implicit type upgrade).
v := reflect.ValueOf(val)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@@ -225,7 +233,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re
operation.Parameters = resolvedParams
for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path,
- func(p spec.Parameter, err error) bool {
+ func(_ spec.Parameter, err error) bool {
// since params have already been expanded, there are few causes for error
res.AddErrors(someParametersBrokenMsg(path, method, operationID))
// original error from analyzer
@@ -250,7 +258,7 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec
}
if err != nil { // Safeguard
- // NOTE: we may enter enter here when the whole parameter is an unresolved $ref
+ // NOTE: we may enter here when the whole parameter is an unresolved $ref
refPath := strings.Join([]string{"\"" + path + "\"", method}, ".")
errorHelp.addPointerError(res, err, param.Ref.String(), refPath)
return nil, res
@@ -306,6 +314,7 @@ func (r *responseHelper) expandResponseRef(
errorHelp.addPointerError(res, err, response.Ref.String(), path)
return nil, res
}
+
return response, res
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/object_validator.go b/test/tools/vendor/github.com/go-openapi/validate/object_validator.go
index 7bb12615d8e..dff73fa98a1 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/object_validator.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/object_validator.go
@@ -15,8 +15,8 @@
package validate
import (
+ "fmt"
"reflect"
- "regexp"
"strings"
"github.com/go-openapi/errors"
@@ -35,62 +35,116 @@ type objectValidator struct {
PatternProperties map[string]spec.Schema
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
+ splitPath []string
+}
+
+func newObjectValidator(path, in string,
+ maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties,
+ additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties,
+ root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var v *objectValidator
+ if opts.recycleValidators {
+ v = pools.poolOfObjectValidators.BorrowValidator()
+ } else {
+ v = new(objectValidator)
+ }
+
+ v.Path = path
+ v.In = in
+ v.MaxProperties = maxProperties
+ v.MinProperties = minProperties
+ v.Required = required
+ v.Properties = properties
+ v.AdditionalProperties = additionalProperties
+ v.PatternProperties = patternProperties
+ v.Root = root
+ v.KnownFormats = formats
+ v.Options = opts
+ v.splitPath = strings.Split(v.Path, ".")
+
+ return v
}
func (o *objectValidator) SetPath(path string) {
o.Path = path
+ o.splitPath = strings.Split(path, ".")
}
func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool {
// TODO: this should also work for structs
// there is a problem in the type validator where it will be unhappy about null values
// so that requires more testing
- r := reflect.TypeOf(source) == specSchemaType && (kind == reflect.Map || kind == reflect.Struct)
- debugLog("object validator for %q applies %t for %T (kind: %v)\n", o.Path, r, source, kind)
- return r
+ _, isSchema := source.(*spec.Schema)
+ return isSchema && (kind == reflect.Map || kind == reflect.Struct)
}
func (o *objectValidator) isProperties() bool {
- p := strings.Split(o.Path, ".")
+ p := o.splitPath
return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties
}
func (o *objectValidator) isDefault() bool {
- p := strings.Split(o.Path, ".")
+ p := o.splitPath
return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault
}
func (o *objectValidator) isExample() bool {
- p := strings.Split(o.Path, ".")
+ p := o.splitPath
return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample
}
func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) {
// for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly.
// with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type).
- if t, typeFound := val[jsonType]; typeFound {
- if tpe, ok := t.(string); ok && tpe == arrayType {
- if item, itemsKeyFound := val[jsonItems]; !itemsKeyFound {
- res.AddErrors(errors.Required(jsonItems, o.Path, item))
- }
- }
+ if val == nil {
+ return
+ }
+
+ t, typeFound := val[jsonType]
+ if !typeFound {
+ return
+ }
+
+ tpe, isString := t.(string)
+ if !isString || tpe != arrayType {
+ return
+ }
+
+ item, itemsKeyFound := val[jsonItems]
+ if itemsKeyFound {
+ return
}
+
+ res.AddErrors(errors.Required(jsonItems, o.Path, item))
}
func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) {
- if !o.isProperties() && !o.isDefault() && !o.isExample() {
- if _, itemsKeyFound := val[jsonItems]; itemsKeyFound {
- t, typeFound := val[jsonType]
- if typeFound {
- if tpe, ok := t.(string); !ok || tpe != arrayType {
- res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil))
- }
- } else {
- // there is no type
- res.AddErrors(errors.Required(jsonType, o.Path, t))
- }
- }
+ if val == nil {
+ return
+ }
+
+ if o.isProperties() || o.isDefault() || o.isExample() {
+ return
+ }
+
+ _, itemsKeyFound := val[jsonItems]
+ if !itemsKeyFound {
+ return
+ }
+
+ t, typeFound := val[jsonType]
+ if !typeFound {
+ // there is no type
+ res.AddErrors(errors.Required(jsonType, o.Path, t))
+ }
+
+ if tpe, isString := t.(string); !isString || tpe != arrayType {
+ res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil))
}
}
@@ -104,176 +158,274 @@ func (o *objectValidator) precheck(res *Result, val map[string]interface{}) {
}
func (o *objectValidator) Validate(data interface{}) *Result {
- val := data.(map[string]interface{})
- // TODO: guard against nil data
+ if o.Options.recycleValidators {
+ defer func() {
+ o.redeem()
+ }()
+ }
+
+ var val map[string]interface{}
+ if data != nil {
+ var ok bool
+ val, ok = data.(map[string]interface{})
+ if !ok {
+ return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult)
+ }
+ }
numKeys := int64(len(val))
if o.MinProperties != nil && numKeys < *o.MinProperties {
- return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties))
+ return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult)
}
if o.MaxProperties != nil && numKeys > *o.MaxProperties {
- return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties))
+ return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult)
}
- res := new(Result)
+ var res *Result
+ if o.Options.recycleResult {
+ res = pools.poolOfResults.BorrowResult()
+ } else {
+ res = new(Result)
+ }
o.precheck(res, val)
// check validity of field names
if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows {
// Case: additionalProperties: false
- for k := range val {
- _, regularProperty := o.Properties[k]
- matched := false
-
- for pk := range o.PatternProperties {
- if matches, _ := regexp.MatchString(pk, k); matches {
- matched = true
- break
- }
+ o.validateNoAdditionalProperties(val, res)
+ } else {
+ // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> }
+ o.validateAdditionalProperties(val, res)
+ }
+
+ o.validatePropertiesSchema(val, res)
+
+ // Check patternProperties
+ // TODO: it looks like we have done that twice in many cases
+ for key, value := range val {
+ _, regularProperty := o.Properties[key]
+ matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well
+ if regularProperty || !matched {
+ continue
+ }
+
+ for _, pName := range patterns {
+ if v, ok := o.PatternProperties[pName]; ok {
+ r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
+ res.mergeForField(data.(map[string]interface{}), key, r)
}
+ }
+ }
- if !regularProperty && k != "$schema" && k != "id" && !matched {
- // Special properties "$schema" and "id" are ignored
- res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k))
-
- // BUG(fredbi): This section should move to a part dedicated to spec validation as
- // it will conflict with regular schemas where a property "headers" is defined.
-
- //
- // Croaks a more explicit message on top of the standard one
- // on some recognized cases.
- //
- // NOTE: edge cases with invalid type assertion are simply ignored here.
- // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered
- // by higher level callers (the IMPORTANT! tag will be eventually
- // removed).
- if k == "headers" && val[k] != nil {
- // $ref is forbidden in header
- if headers, mapOk := val[k].(map[string]interface{}); mapOk {
- for headerKey, headerBody := range headers {
- if headerBody != nil {
- if headerSchema, mapOfMapOk := headerBody.(map[string]interface{}); mapOfMapOk {
- if _, found := headerSchema["$ref"]; found {
- var msg string
- if refString, stringOk := headerSchema["$ref"].(string); stringOk {
- msg = strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "")
- }
- res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg))
- }
- }
- }
- }
- }
- /*
- case "$ref":
- if val[k] != nil {
- // TODO: check context of that ref: warn about siblings, check against invalid context
- }
- */
- }
+ return res
+}
+
+func (o *objectValidator) validateNoAdditionalProperties(val map[string]interface{}, res *Result) {
+ for k := range val {
+ if k == "$schema" || k == "id" {
+ // special properties "$schema" and "id" are ignored
+ continue
+ }
+
+ _, regularProperty := o.Properties[k]
+ if regularProperty {
+ continue
+ }
+
+ matched := false
+ for pk := range o.PatternProperties {
+ re, err := compileRegexp(pk)
+ if err != nil {
+ continue
+ }
+ if matches := re.MatchString(k); matches {
+ matched = true
+ break
}
}
- } else {
- // Cases: no additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> }
- for key, value := range val {
- _, regularProperty := o.Properties[key]
-
- // Validates property against "patternProperties" if applicable
- // BUG(fredbi): succeededOnce is always false
-
- // NOTE: how about regular properties which do not match patternProperties?
- matched, succeededOnce, _ := o.validatePatternProperty(key, value, res)
-
- if !(regularProperty || matched || succeededOnce) {
-
- // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator
- if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil {
- // AdditionalProperties as Schema
- r := NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)
- res.mergeForField(data.(map[string]interface{}), key, r)
- } else if regularProperty && !(matched || succeededOnce) {
- // TODO: this is dead code since regularProperty=false here
- res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key))
- }
+ if matched {
+ continue
+ }
+
+ res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k))
+
+ // BUG(fredbi): This section should move to a part dedicated to spec validation as
+ // it will conflict with regular schemas where a property "headers" is defined.
+
+ //
+ // Croaks a more explicit message on top of the standard one
+ // on some recognized cases.
+ //
+ // NOTE: edge cases with invalid type assertion are simply ignored here.
+ // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered
+ // by higher level callers (the IMPORTANT! tag will be eventually
+ // removed).
+ if k != "headers" || val[k] == nil {
+ continue
+ }
+
+ // $ref is forbidden in header
+ headers, mapOk := val[k].(map[string]interface{})
+ if !mapOk {
+ continue
+ }
+
+ for headerKey, headerBody := range headers {
+ if headerBody == nil {
+ continue
+ }
+
+ headerSchema, mapOfMapOk := headerBody.(map[string]interface{})
+ if !mapOfMapOk {
+ continue
+ }
+
+ _, found := headerSchema["$ref"]
+ if !found {
+ continue
+ }
+
+ refString, stringOk := headerSchema["$ref"].(string)
+ if !stringOk {
+ continue
}
+
+ msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "")
+ res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg))
+ /*
+ case "$ref":
+ if val[k] != nil {
+ // TODO: check context of that ref: warn about siblings, check against invalid context
+ }
+ */
+ }
+ }
+}
+
+func (o *objectValidator) validateAdditionalProperties(val map[string]interface{}, res *Result) {
+ for key, value := range val {
+ _, regularProperty := o.Properties[key]
+ if regularProperty {
+ continue
+ }
+
+ // Validates property against "patternProperties" if applicable
+ // BUG(fredbi): succeededOnce is always false
+
+ // NOTE: how about regular properties which do not match patternProperties?
+ matched, succeededOnce, _ := o.validatePatternProperty(key, value, res)
+ if matched || succeededOnce {
+ continue
+ }
+
+ if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil {
+ continue
}
- // Valid cases: additionalProperties: true or undefined
+
+ // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator
+ // AdditionalProperties as Schema
+ r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
+ res.mergeForField(val, key, r)
}
+ // Valid cases: additionalProperties: true or undefined
+}
- createdFromDefaults := map[string]bool{}
+func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, res *Result) {
+ createdFromDefaults := map[string]struct{}{}
// Property types:
// - regular Property
+ pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties
+ defer func() {
+ pools.poolOfSchemas.RedeemSchema(pSchema)
+ }()
+
for pName := range o.Properties {
- pSchema := o.Properties[pName] // one instance per iteration
- rName := pName
- if o.Path != "" {
+ *pSchema = o.Properties[pName]
+ var rName string
+ if o.Path == "" {
+ rName = pName
+ } else {
rName = o.Path + "." + pName
}
// Recursively validates each property against its schema
- if v, ok := val[pName]; ok {
- r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v)
- res.mergeForField(data.(map[string]interface{}), pName, r)
- } else if pSchema.Default != nil {
- // If a default value is defined, creates the property from defaults
- // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does.
- createdFromDefaults[pName] = true
- res.addPropertySchemata(data.(map[string]interface{}), pName, &pSchema)
+ v, ok := val[pName]
+ if ok {
+ r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v)
+ res.mergeForField(val, pName, r)
+
+ continue
}
- }
- // Check required properties
- if len(o.Required) > 0 {
- for _, k := range o.Required {
- if v, ok := val[k]; !ok && !createdFromDefaults[k] {
- res.AddErrors(errors.Required(o.Path+"."+k, o.In, v))
- continue
+ if pSchema.Default != nil {
+ // if a default value is defined, creates the property from defaults
+ // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does.
+ createdFromDefaults[pName] = struct{}{}
+ if !o.Options.skipSchemataResult {
+ res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer
}
}
}
- // Check patternProperties
- // TODO: it looks like we have done that twice in many cases
- for key, value := range val {
- _, regularProperty := o.Properties[key]
- matched, _ /*succeededOnce*/, patterns := o.validatePatternProperty(key, value, res)
- if !regularProperty && (matched /*|| succeededOnce*/) {
- for _, pName := range patterns {
- if v, ok := o.PatternProperties[pName]; ok {
- r := NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)
- res.mergeForField(data.(map[string]interface{}), key, r)
- }
- }
+ if len(o.Required) == 0 {
+ return
+ }
+
+ // Check required properties
+ for _, k := range o.Required {
+ v, ok := val[k]
+ if ok {
+ continue
+ }
+ _, isCreatedFromDefaults := createdFromDefaults[k]
+ if isCreatedFromDefaults {
+ continue
}
+
+ res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v))
}
- return res
}
// TODO: succeededOnce is not used anywhere
func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) {
+ if len(o.PatternProperties) == 0 {
+ return false, false, nil
+ }
+
matched := false
succeededOnce := false
- var patterns []string
+ patterns := make([]string, 0, len(o.PatternProperties))
- for k, schema := range o.PatternProperties {
- sch := schema
- if match, _ := regexp.MatchString(k, key); match {
- patterns = append(patterns, k)
- matched = true
- validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...)
+ schema := pools.poolOfSchemas.BorrowSchema()
+ defer func() {
+ pools.poolOfSchemas.RedeemSchema(schema)
+ }()
- res := validator.Validate(value)
- result.Merge(res)
+ for k := range o.PatternProperties {
+ re, err := compileRegexp(k)
+ if err != nil {
+ continue
}
- }
- // BUG(fredbi): can't get to here. Should remove dead code (commented out).
+ match := re.MatchString(key)
+ if !match {
+ continue
+ }
- // if succeededOnce {
- // result.Inc()
- // }
+ *schema = o.PatternProperties[k]
+ patterns = append(patterns, k)
+ matched = true
+ validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options)
+
+ res := validator.Validate(value)
+ result.Merge(res)
+ }
return matched, succeededOnce, patterns
}
+
+func (o *objectValidator) redeem() {
+ pools.poolOfObjectValidators.RedeemValidator(o)
+}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/options.go b/test/tools/vendor/github.com/go-openapi/validate/options.go
index deeec2f2ecc..cfe9b0660f6 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/options.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/options.go
@@ -21,10 +21,29 @@ import "sync"
// NOTE: other options might be needed, for example a go-swagger specific mode.
type Opts struct {
ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid
+
+ // StrictPathParamUniqueness enables a strict validation of paths that include
+ // path parameters. When true, it will enforce that for each method, the path
+ // is unique, regardless of path parameters such that GET:/petstore/{id} and
+ // GET:/petstore/{pet} anre considered duplicate paths.
+ //
+ // Consider disabling if path parameters can include slashes such as
+ // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and
+ // /"shelve/*/book/*" respectively.
+ StrictPathParamUniqueness bool
+ SkipSchemataResult bool
}
var (
- defaultOpts = Opts{ContinueOnErrors: false} // default is to stop validation on errors
+ defaultOpts = Opts{
+ // default is to stop validation on errors
+ ContinueOnErrors: false,
+
+ // StrictPathParamUniqueness is defaulted to true. This maintains existing
+ // behavior.
+ StrictPathParamUniqueness: true,
+ }
+
defaultOptsMutex = &sync.Mutex{}
)
diff --git a/test/tools/vendor/github.com/go-openapi/validate/pools.go b/test/tools/vendor/github.com/go-openapi/validate/pools.go
new file mode 100644
index 00000000000..3ddce4dcc2b
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/validate/pools.go
@@ -0,0 +1,366 @@
+//go:build !validatedebug
+
+package validate
+
+import (
+ "sync"
+
+ "github.com/go-openapi/spec"
+)
+
+var pools allPools
+
+func init() {
+ resetPools()
+}
+
+func resetPools() {
+ // NOTE: for testing purpose, we might want to reset pools after calling Validate twice.
+ // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool
+ // and further calls to Get are mishandled.
+
+ pools = allPools{
+ poolOfSchemaValidators: schemaValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &SchemaValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfObjectValidators: objectValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &objectValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSliceValidators: sliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaSliceValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfItemsValidators: itemsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &itemsValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfBasicCommonValidators: basicCommonValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicCommonValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfHeaderValidators: headerValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &HeaderValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfParamValidators: paramValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &ParamValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfBasicSliceValidators: basicSliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicSliceValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfNumberValidators: numberValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &numberValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfStringValidators: stringValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &stringValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSchemaPropsValidators: schemaPropsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaPropsValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfFormatValidators: formatValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &formatValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfTypeValidators: typeValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &typeValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSchemas: schemasPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &spec.Schema{}
+
+ return s
+ },
+ },
+ },
+ poolOfResults: resultsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &Result{}
+
+ return s
+ },
+ },
+ },
+ }
+}
+
+type (
+ allPools struct {
+ // memory pools for all validator objects.
+ //
+ // Each pool can be borrowed from and redeemed to.
+ poolOfSchemaValidators schemaValidatorsPool
+ poolOfObjectValidators objectValidatorsPool
+ poolOfSliceValidators sliceValidatorsPool
+ poolOfItemsValidators itemsValidatorsPool
+ poolOfBasicCommonValidators basicCommonValidatorsPool
+ poolOfHeaderValidators headerValidatorsPool
+ poolOfParamValidators paramValidatorsPool
+ poolOfBasicSliceValidators basicSliceValidatorsPool
+ poolOfNumberValidators numberValidatorsPool
+ poolOfStringValidators stringValidatorsPool
+ poolOfSchemaPropsValidators schemaPropsValidatorsPool
+ poolOfFormatValidators formatValidatorsPool
+ poolOfTypeValidators typeValidatorsPool
+ poolOfSchemas schemasPool
+ poolOfResults resultsPool
+ }
+
+ schemaValidatorsPool struct {
+ *sync.Pool
+ }
+
+ objectValidatorsPool struct {
+ *sync.Pool
+ }
+
+ sliceValidatorsPool struct {
+ *sync.Pool
+ }
+
+ itemsValidatorsPool struct {
+ *sync.Pool
+ }
+
+ basicCommonValidatorsPool struct {
+ *sync.Pool
+ }
+
+ headerValidatorsPool struct {
+ *sync.Pool
+ }
+
+ paramValidatorsPool struct {
+ *sync.Pool
+ }
+
+ basicSliceValidatorsPool struct {
+ *sync.Pool
+ }
+
+ numberValidatorsPool struct {
+ *sync.Pool
+ }
+
+ stringValidatorsPool struct {
+ *sync.Pool
+ }
+
+ schemaPropsValidatorsPool struct {
+ *sync.Pool
+ }
+
+ formatValidatorsPool struct {
+ *sync.Pool
+ }
+
+ typeValidatorsPool struct {
+ *sync.Pool
+ }
+
+ schemasPool struct {
+ *sync.Pool
+ }
+
+ resultsPool struct {
+ *sync.Pool
+ }
+)
+
+func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator {
+ return p.Get().(*SchemaValidator)
+}
+
+func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) {
+ // NOTE: s might be nil. In that case, Put is a noop.
+ p.Put(s)
+}
+
+func (p objectValidatorsPool) BorrowValidator() *objectValidator {
+ return p.Get().(*objectValidator)
+}
+
+func (p objectValidatorsPool) RedeemValidator(s *objectValidator) {
+ p.Put(s)
+}
+
+func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator {
+ return p.Get().(*schemaSliceValidator)
+}
+
+func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) {
+ p.Put(s)
+}
+
+func (p itemsValidatorsPool) BorrowValidator() *itemsValidator {
+ return p.Get().(*itemsValidator)
+}
+
+func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) {
+ p.Put(s)
+}
+
+func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator {
+ return p.Get().(*basicCommonValidator)
+}
+
+func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) {
+ p.Put(s)
+}
+
+func (p headerValidatorsPool) BorrowValidator() *HeaderValidator {
+ return p.Get().(*HeaderValidator)
+}
+
+func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) {
+ p.Put(s)
+}
+
+func (p paramValidatorsPool) BorrowValidator() *ParamValidator {
+ return p.Get().(*ParamValidator)
+}
+
+func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) {
+ p.Put(s)
+}
+
+func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator {
+ return p.Get().(*basicSliceValidator)
+}
+
+func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) {
+ p.Put(s)
+}
+
+func (p numberValidatorsPool) BorrowValidator() *numberValidator {
+ return p.Get().(*numberValidator)
+}
+
+func (p numberValidatorsPool) RedeemValidator(s *numberValidator) {
+ p.Put(s)
+}
+
+func (p stringValidatorsPool) BorrowValidator() *stringValidator {
+ return p.Get().(*stringValidator)
+}
+
+func (p stringValidatorsPool) RedeemValidator(s *stringValidator) {
+ p.Put(s)
+}
+
+func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator {
+ return p.Get().(*schemaPropsValidator)
+}
+
+func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) {
+ p.Put(s)
+}
+
+func (p formatValidatorsPool) BorrowValidator() *formatValidator {
+ return p.Get().(*formatValidator)
+}
+
+func (p formatValidatorsPool) RedeemValidator(s *formatValidator) {
+ p.Put(s)
+}
+
+func (p typeValidatorsPool) BorrowValidator() *typeValidator {
+ return p.Get().(*typeValidator)
+}
+
+func (p typeValidatorsPool) RedeemValidator(s *typeValidator) {
+ p.Put(s)
+}
+
+func (p schemasPool) BorrowSchema() *spec.Schema {
+ return p.Get().(*spec.Schema)
+}
+
+func (p schemasPool) RedeemSchema(s *spec.Schema) {
+ p.Put(s)
+}
+
+func (p resultsPool) BorrowResult() *Result {
+ return p.Get().(*Result).cleared()
+}
+
+func (p resultsPool) RedeemResult(s *Result) {
+ if s == emptyResult {
+ return
+ }
+ p.Put(s)
+}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/pools_debug.go b/test/tools/vendor/github.com/go-openapi/validate/pools_debug.go
new file mode 100644
index 00000000000..12949f02a7e
--- /dev/null
+++ b/test/tools/vendor/github.com/go-openapi/validate/pools_debug.go
@@ -0,0 +1,1012 @@
+//go:build validatedebug
+
+package validate
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "testing"
+
+ "github.com/go-openapi/spec"
+)
+
+// This version of the pools is to be used for debugging and testing, with build tag "validatedebug".
+//
+// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can
+// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected.
+
+var pools allPools
+
+func init() {
+ resetPools()
+}
+
+func resetPools() {
+ // NOTE: for testing purpose, we might want to reset pools after calling Validate twice.
+ // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool
+ // and further calls to Get are mishandled.
+
+ pools = allPools{
+ poolOfSchemaValidators: schemaValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &SchemaValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*SchemaValidator]status),
+ allocMap: make(map[*SchemaValidator]string),
+ redeemMap: make(map[*SchemaValidator]string),
+ },
+ poolOfObjectValidators: objectValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &objectValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*objectValidator]status),
+ allocMap: make(map[*objectValidator]string),
+ redeemMap: make(map[*objectValidator]string),
+ },
+ poolOfSliceValidators: sliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaSliceValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*schemaSliceValidator]status),
+ allocMap: make(map[*schemaSliceValidator]string),
+ redeemMap: make(map[*schemaSliceValidator]string),
+ },
+ poolOfItemsValidators: itemsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &itemsValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*itemsValidator]status),
+ allocMap: make(map[*itemsValidator]string),
+ redeemMap: make(map[*itemsValidator]string),
+ },
+ poolOfBasicCommonValidators: basicCommonValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicCommonValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*basicCommonValidator]status),
+ allocMap: make(map[*basicCommonValidator]string),
+ redeemMap: make(map[*basicCommonValidator]string),
+ },
+ poolOfHeaderValidators: headerValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &HeaderValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*HeaderValidator]status),
+ allocMap: make(map[*HeaderValidator]string),
+ redeemMap: make(map[*HeaderValidator]string),
+ },
+ poolOfParamValidators: paramValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &ParamValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*ParamValidator]status),
+ allocMap: make(map[*ParamValidator]string),
+ redeemMap: make(map[*ParamValidator]string),
+ },
+ poolOfBasicSliceValidators: basicSliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicSliceValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*basicSliceValidator]status),
+ allocMap: make(map[*basicSliceValidator]string),
+ redeemMap: make(map[*basicSliceValidator]string),
+ },
+ poolOfNumberValidators: numberValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &numberValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*numberValidator]status),
+ allocMap: make(map[*numberValidator]string),
+ redeemMap: make(map[*numberValidator]string),
+ },
+ poolOfStringValidators: stringValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &stringValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*stringValidator]status),
+ allocMap: make(map[*stringValidator]string),
+ redeemMap: make(map[*stringValidator]string),
+ },
+ poolOfSchemaPropsValidators: schemaPropsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaPropsValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*schemaPropsValidator]status),
+ allocMap: make(map[*schemaPropsValidator]string),
+ redeemMap: make(map[*schemaPropsValidator]string),
+ },
+ poolOfFormatValidators: formatValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &formatValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*formatValidator]status),
+ allocMap: make(map[*formatValidator]string),
+ redeemMap: make(map[*formatValidator]string),
+ },
+ poolOfTypeValidators: typeValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &typeValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*typeValidator]status),
+ allocMap: make(map[*typeValidator]string),
+ redeemMap: make(map[*typeValidator]string),
+ },
+ poolOfSchemas: schemasPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &spec.Schema{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*spec.Schema]status),
+ allocMap: make(map[*spec.Schema]string),
+ redeemMap: make(map[*spec.Schema]string),
+ },
+ poolOfResults: resultsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &Result{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*Result]status),
+ allocMap: make(map[*Result]string),
+ redeemMap: make(map[*Result]string),
+ },
+ }
+}
+
+const (
+ statusFresh status = iota + 1
+ statusRecycled
+ statusRedeemed
+)
+
+func (s status) String() string {
+ switch s {
+ case statusFresh:
+ return "fresh"
+ case statusRecycled:
+ return "recycled"
+ case statusRedeemed:
+ return "redeemed"
+ default:
+ panic(fmt.Errorf("invalid status: %d", s))
+ }
+}
+
+type (
+ // Debug
+ status uint8
+
+ allPools struct {
+ // memory pools for all validator objects.
+ //
+ // Each pool can be borrowed from and redeemed to.
+ poolOfSchemaValidators schemaValidatorsPool
+ poolOfObjectValidators objectValidatorsPool
+ poolOfSliceValidators sliceValidatorsPool
+ poolOfItemsValidators itemsValidatorsPool
+ poolOfBasicCommonValidators basicCommonValidatorsPool
+ poolOfHeaderValidators headerValidatorsPool
+ poolOfParamValidators paramValidatorsPool
+ poolOfBasicSliceValidators basicSliceValidatorsPool
+ poolOfNumberValidators numberValidatorsPool
+ poolOfStringValidators stringValidatorsPool
+ poolOfSchemaPropsValidators schemaPropsValidatorsPool
+ poolOfFormatValidators formatValidatorsPool
+ poolOfTypeValidators typeValidatorsPool
+ poolOfSchemas schemasPool
+ poolOfResults resultsPool
+ }
+
+ schemaValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*SchemaValidator]status
+ allocMap map[*SchemaValidator]string
+ redeemMap map[*SchemaValidator]string
+ mx sync.Mutex
+ }
+
+ objectValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*objectValidator]status
+ allocMap map[*objectValidator]string
+ redeemMap map[*objectValidator]string
+ mx sync.Mutex
+ }
+
+ sliceValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*schemaSliceValidator]status
+ allocMap map[*schemaSliceValidator]string
+ redeemMap map[*schemaSliceValidator]string
+ mx sync.Mutex
+ }
+
+ itemsValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*itemsValidator]status
+ allocMap map[*itemsValidator]string
+ redeemMap map[*itemsValidator]string
+ mx sync.Mutex
+ }
+
+ basicCommonValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*basicCommonValidator]status
+ allocMap map[*basicCommonValidator]string
+ redeemMap map[*basicCommonValidator]string
+ mx sync.Mutex
+ }
+
+ headerValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*HeaderValidator]status
+ allocMap map[*HeaderValidator]string
+ redeemMap map[*HeaderValidator]string
+ mx sync.Mutex
+ }
+
+ paramValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*ParamValidator]status
+ allocMap map[*ParamValidator]string
+ redeemMap map[*ParamValidator]string
+ mx sync.Mutex
+ }
+
+ basicSliceValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*basicSliceValidator]status
+ allocMap map[*basicSliceValidator]string
+ redeemMap map[*basicSliceValidator]string
+ mx sync.Mutex
+ }
+
+ numberValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*numberValidator]status
+ allocMap map[*numberValidator]string
+ redeemMap map[*numberValidator]string
+ mx sync.Mutex
+ }
+
+ stringValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*stringValidator]status
+ allocMap map[*stringValidator]string
+ redeemMap map[*stringValidator]string
+ mx sync.Mutex
+ }
+
+ schemaPropsValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*schemaPropsValidator]status
+ allocMap map[*schemaPropsValidator]string
+ redeemMap map[*schemaPropsValidator]string
+ mx sync.Mutex
+ }
+
+ formatValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*formatValidator]status
+ allocMap map[*formatValidator]string
+ redeemMap map[*formatValidator]string
+ mx sync.Mutex
+ }
+
+ typeValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*typeValidator]status
+ allocMap map[*typeValidator]string
+ redeemMap map[*typeValidator]string
+ mx sync.Mutex
+ }
+
+ schemasPool struct {
+ *sync.Pool
+ debugMap map[*spec.Schema]status
+ allocMap map[*spec.Schema]string
+ redeemMap map[*spec.Schema]string
+ mx sync.Mutex
+ }
+
+ resultsPool struct {
+ *sync.Pool
+ debugMap map[*Result]status
+ allocMap map[*Result]string
+ redeemMap map[*Result]string
+ mx sync.Mutex
+ }
+)
+
+func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator {
+ s := p.Get().(*SchemaValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled schema should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) {
+ // NOTE: s might be nil. In that case, Put is a noop.
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schema should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schema should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *objectValidatorsPool) BorrowValidator() *objectValidator {
+ s := p.Get().(*objectValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled object should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed object should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed object should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator {
+ s := p.Get().(*schemaSliceValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled schemaSliceValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schemaSliceValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator {
+ s := p.Get().(*itemsValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled itemsValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed itemsValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator {
+ s := p.Get().(*basicCommonValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled basicCommonValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed basicCommonValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator {
+ s := p.Get().(*HeaderValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled HeaderValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed header should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed header should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *paramValidatorsPool) BorrowValidator() *ParamValidator {
+ s := p.Get().(*ParamValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled param should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed param should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed param should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator {
+ s := p.Get().(*basicSliceValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled basicSliceValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed basicSliceValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *numberValidatorsPool) BorrowValidator() *numberValidator {
+ s := p.Get().(*numberValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled number should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed number should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed number should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *stringValidatorsPool) BorrowValidator() *stringValidator {
+ s := p.Get().(*stringValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled string should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed string should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed string should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator {
+ s := p.Get().(*schemaPropsValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled param should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schemaProps should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *formatValidatorsPool) BorrowValidator() *formatValidator {
+ s := p.Get().(*formatValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled format should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed format should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed format should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *typeValidatorsPool) BorrowValidator() *typeValidator {
+ s := p.Get().(*typeValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled type should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed type should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s]))
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *schemasPool) BorrowSchema() *spec.Schema {
+ s := p.Get().(*spec.Schema)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled spec.Schema should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemasPool) RedeemSchema(s *spec.Schema) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed spec.Schema should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *resultsPool) BorrowResult() *Result {
+ s := p.Get().(*Result).cleared()
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled result should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *resultsPool) RedeemResult(s *Result) {
+ if s == emptyResult {
+ if len(s.Errors) > 0 || len(s.Warnings) > 0 {
+ panic("empty result should not mutate")
+ }
+ return
+ }
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed Result should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed Result should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *allPools) allIsRedeemed(t testing.TB) bool {
+ outcome := true
+ for k, v := range p.poolOfSchemaValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfObjectValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSliceValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfItemsValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfBasicCommonValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfHeaderValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfParamValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfBasicSliceValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfNumberValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfStringValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSchemaPropsValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfFormatValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfTypeValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSchemas.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfResults.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k])
+ outcome = false
+ }
+
+ return outcome
+}
+
+func caller() string {
+ pc, _, _, _ := runtime.Caller(3) //nolint:dogsled
+ from, line := runtime.FuncForPC(pc).FileLine(pc)
+
+ return fmt.Sprintf("%s:%d", from, line)
+}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/result.go b/test/tools/vendor/github.com/go-openapi/validate/result.go
index 8f5f935e5d1..c80804a93d0 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/result.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/result.go
@@ -15,7 +15,7 @@
package validate
import (
- "fmt"
+ stderrors "errors"
"reflect"
"strings"
@@ -23,6 +23,8 @@ import (
"github.com/go-openapi/spec"
)
+var emptyResult = &Result{MatchCount: 1}
+
// Result represents a validation result set, composed of
// errors and warnings.
//
@@ -50,8 +52,10 @@ type Result struct {
// Schemata for slice items
itemSchemata []itemSchemata
- cachedFieldSchemta map[FieldKey][]*spec.Schema
- cachedItemSchemata map[ItemKey][]*spec.Schema
+ cachedFieldSchemata map[FieldKey][]*spec.Schema
+ cachedItemSchemata map[ItemKey][]*spec.Schema
+
+ wantsRedeemOnMerge bool
}
// FieldKey is a pair of an object and a field, usable as a key for a map.
@@ -116,6 +120,9 @@ func (r *Result) Merge(others ...*Result) *Result {
}
r.mergeWithoutRootSchemata(other)
r.rootObjectSchemata.Append(other.rootObjectSchemata)
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
}
return r
}
@@ -132,10 +139,9 @@ func (r *Result) RootObjectSchemata() []*spec.Schema {
}
// FieldSchemata returns the schemata which apply to fields in objects.
-// nolint: dupl
func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema {
- if r.cachedFieldSchemta != nil {
- return r.cachedFieldSchemta
+ if r.cachedFieldSchemata != nil {
+ return r.cachedFieldSchemata
}
ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata))
@@ -147,12 +153,12 @@ func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema {
ret[key] = append(ret[key], fs.schemata.multiple...)
}
}
- r.cachedFieldSchemta = ret
+ r.cachedFieldSchemata = ret
+
return ret
}
// ItemSchemata returns the schemata which apply to items in slices.
-// nolint: dupl
func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema {
if r.cachedItemSchemata != nil {
return r.cachedItemSchemata
@@ -172,12 +178,13 @@ func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema {
}
func (r *Result) resetCaches() {
- r.cachedFieldSchemta = nil
+ r.cachedFieldSchemata = nil
r.cachedItemSchemata = nil
}
// mergeForField merges other into r, assigning other's root schemata to the given Object and field name.
-// nolint: unparam
+//
+//nolint:unparam
func (r *Result) mergeForField(obj map[string]interface{}, field string, other *Result) *Result {
if other == nil {
return r
@@ -188,18 +195,23 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other *
if r.fieldSchemata == nil {
r.fieldSchemata = make([]fieldSchemata, len(obj))
}
+ // clone other schemata, as other is about to be redeemed to the pool
r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{
obj: obj,
field: field,
- schemata: other.rootObjectSchemata,
+ schemata: other.rootObjectSchemata.Clone(),
})
}
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
return r
}
// mergeForSlice merges other into r, assigning other's root schemata to the given slice and index.
-// nolint: unparam
+//
+//nolint:unparam
func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result {
if other == nil {
return r
@@ -210,29 +222,38 @@ func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Resul
if r.itemSchemata == nil {
r.itemSchemata = make([]itemSchemata, slice.Len())
}
+ // clone other schemata, as other is about to be redeemed to the pool
r.itemSchemata = append(r.itemSchemata, itemSchemata{
slice: slice,
index: i,
- schemata: other.rootObjectSchemata,
+ schemata: other.rootObjectSchemata.Clone(),
})
}
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
+
return r
}
// addRootObjectSchemata adds the given schemata for the root object of the result.
-// The slice schemata might be reused. I.e. do not modify it after being added to a result.
+//
+// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
func (r *Result) addRootObjectSchemata(s *spec.Schema) {
- r.rootObjectSchemata.Append(schemata{one: s})
+ clone := *s
+ r.rootObjectSchemata.Append(schemata{one: &clone})
}
// addPropertySchemata adds the given schemata for the object and field.
-// The slice schemata might be reused. I.e. do not modify it after being added to a result.
+//
+// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) {
if r.fieldSchemata == nil {
r.fieldSchemata = make([]fieldSchemata, 0, len(obj))
}
- r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: schema}})
+ clone := *schema
+ r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}})
}
/*
@@ -255,17 +276,21 @@ func (r *Result) mergeWithoutRootSchemata(other *Result) {
if other.fieldSchemata != nil {
if r.fieldSchemata == nil {
- r.fieldSchemata = other.fieldSchemata
- } else {
- r.fieldSchemata = append(r.fieldSchemata, other.fieldSchemata...)
+ r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata))
+ }
+ for _, field := range other.fieldSchemata {
+ field.schemata = field.schemata.Clone()
+ r.fieldSchemata = append(r.fieldSchemata, field)
}
}
if other.itemSchemata != nil {
if r.itemSchemata == nil {
- r.itemSchemata = other.itemSchemata
- } else {
- r.itemSchemata = append(r.itemSchemata, other.itemSchemata...)
+ r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata))
+ }
+ for _, field := range other.itemSchemata {
+ field.schemata = field.schemata.Clone()
+ r.itemSchemata = append(r.itemSchemata, field)
}
}
}
@@ -280,6 +305,9 @@ func (r *Result) MergeAsErrors(others ...*Result) *Result {
r.AddErrors(other.Errors...)
r.AddErrors(other.Warnings...)
r.MatchCount += other.MatchCount
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
}
}
return r
@@ -295,6 +323,9 @@ func (r *Result) MergeAsWarnings(others ...*Result) *Result {
r.AddWarnings(other.Errors...)
r.AddWarnings(other.Warnings...)
r.MatchCount += other.MatchCount
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
}
}
return r
@@ -356,16 +387,21 @@ func (r *Result) keepRelevantErrors() *Result {
strippedErrors := []error{}
for _, e := range r.Errors {
if strings.HasPrefix(e.Error(), "IMPORTANT!") {
- strippedErrors = append(strippedErrors, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
+ strippedErrors = append(strippedErrors, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
}
}
strippedWarnings := []error{}
for _, e := range r.Warnings {
if strings.HasPrefix(e.Error(), "IMPORTANT!") {
- strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
+ strippedWarnings = append(strippedWarnings, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
}
}
- strippedResult := new(Result)
+ var strippedResult *Result
+ if r.wantsRedeemOnMerge {
+ strippedResult = pools.poolOfResults.BorrowResult()
+ } else {
+ strippedResult = new(Result)
+ }
strippedResult.Errors = strippedErrors
strippedResult.Warnings = strippedWarnings
return strippedResult
@@ -427,6 +463,27 @@ func (r *Result) AsError() error {
return errors.CompositeValidationError(r.Errors...)
}
+func (r *Result) cleared() *Result {
+ // clear the Result to be reusable. Keep allocated capacity.
+ r.Errors = r.Errors[:0]
+ r.Warnings = r.Warnings[:0]
+ r.MatchCount = 0
+ r.data = nil
+ r.rootObjectSchemata.one = nil
+ r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0]
+ r.fieldSchemata = r.fieldSchemata[:0]
+ r.itemSchemata = r.itemSchemata[:0]
+ for k := range r.cachedFieldSchemata {
+ delete(r.cachedFieldSchemata, k)
+ }
+ for k := range r.cachedItemSchemata {
+ delete(r.cachedItemSchemata, k)
+ }
+ r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another
+
+ return r
+}
+
// schemata is an arbitrary number of schemata. It does a distinction between zero,
// one and many schemata to avoid slice allocations.
type schemata struct {
@@ -453,7 +510,7 @@ func (s *schemata) Slice() []*spec.Schema {
return s.multiple
}
-// appendSchemata appends the schemata in other to s. It mutated s in-place.
+// appendSchemata appends the schemata in other to s. It mutates s in-place.
func (s *schemata) Append(other schemata) {
if other.one == nil && len(other.multiple) == 0 {
return
@@ -484,3 +541,23 @@ func (s *schemata) Append(other schemata) {
}
}
}
+
+func (s schemata) Clone() schemata {
+ var clone schemata
+
+ if s.one != nil {
+ clone.one = new(spec.Schema)
+ *clone.one = *s.one
+ }
+
+ if len(s.multiple) > 0 {
+ clone.multiple = make([]*spec.Schema, len(s.multiple))
+ for idx := 0; idx < len(s.multiple); idx++ {
+ sp := new(spec.Schema)
+ *sp = *s.multiple[idx]
+ clone.multiple[idx] = sp
+ }
+ }
+
+ return clone
+}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/schema.go b/test/tools/vendor/github.com/go-openapi/validate/schema.go
index b817eb0ef30..db65264fd10 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/schema.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/schema.go
@@ -24,32 +24,32 @@ import (
"github.com/go-openapi/swag"
)
-var (
- specSchemaType = reflect.TypeOf(&spec.Schema{})
- specParameterType = reflect.TypeOf(&spec.Parameter{})
- specHeaderType = reflect.TypeOf(&spec.Header{})
- // specItemsType = reflect.TypeOf(&spec.Items{})
-)
-
// SchemaValidator validates data against a JSON schema
type SchemaValidator struct {
Path string
in string
Schema *spec.Schema
- validators []valueValidator
+ validators [8]valueValidator
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
}
// AgainstSchema validates the specified data against the provided schema, using a registry of supported formats.
//
// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example.
func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error {
- res := NewSchemaValidator(schema, nil, "", formats, options...).Validate(data)
+ res := NewSchemaValidator(schema, nil, "", formats,
+ append(options, WithRecycleValidators(true), withRecycleResults(true))...,
+ ).Validate(data)
+ defer func() {
+ pools.poolOfResults.RedeemResult(res)
+ }()
+
if res.HasErrors() {
return errors.CompositeValidationError(res.Errors...)
}
+
return nil
}
@@ -57,6 +57,15 @@ func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registr
//
// Panics if the provided schema is invalid.
func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newSchemaValidator(schema, rootSchema, root, formats, opts)
+}
+
+func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator {
if schema == nil {
return nil
}
@@ -72,17 +81,26 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string
panic(msg)
}
}
- s := SchemaValidator{
- Path: root,
- in: "body",
- Schema: schema,
- Root: rootSchema,
- KnownFormats: formats,
- Options: SchemaValidatorOptions{}}
- for _, o := range options {
- o(&s.Options)
+
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
}
- s.validators = []valueValidator{
+
+ var s *SchemaValidator
+ if opts.recycleValidators {
+ s = pools.poolOfSchemaValidators.BorrowValidator()
+ } else {
+ s = new(SchemaValidator)
+ }
+
+ s.Path = root
+ s.in = "body"
+ s.Schema = schema
+ s.Root = rootSchema
+ s.Options = opts
+ s.KnownFormats = formats
+
+ s.validators = [8]valueValidator{
s.typeValidator(),
s.schemaPropsValidator(),
s.stringValidator(),
@@ -92,7 +110,8 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string
s.commonValidator(),
s.objectValidator(),
}
- return &s
+
+ return s
}
// SetPath sets the path for this schema valdiator
@@ -101,24 +120,46 @@ func (s *SchemaValidator) SetPath(path string) {
}
// Applies returns true when this schema validator applies
-func (s *SchemaValidator) Applies(source interface{}, kind reflect.Kind) bool {
+func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool {
_, ok := source.(*spec.Schema)
return ok
}
// Validate validates the data against the schema
func (s *SchemaValidator) Validate(data interface{}) *Result {
- result := &Result{data: data}
if s == nil {
- return result
+ return emptyResult
}
- if s.Schema != nil {
+
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeemChildren()
+ s.redeem() // one-time use validator
+ }()
+ }
+
+ var result *Result
+ if s.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ result.data = data
+ } else {
+ result = &Result{data: data}
+ }
+
+ if s.Schema != nil && !s.Options.skipSchemataResult {
result.addRootObjectSchemata(s.Schema)
}
if data == nil {
+ // early exit with minimal validation
result.Merge(s.validators[0].Validate(data)) // type validator
result.Merge(s.validators[6].Validate(data)) // common validator
+
+ if s.Options.recycleValidators {
+ s.validators[0] = nil
+ s.validators[6] = nil
+ }
+
return result
}
@@ -147,6 +188,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
if erri != nil {
result.AddErrors(invalidTypeConversionMsg(s.Path, erri))
result.Inc()
+
return result
}
d = in
@@ -155,6 +197,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
if errf != nil {
result.AddErrors(invalidTypeConversionMsg(s.Path, errf))
result.Inc()
+
return result
}
d = nf
@@ -164,14 +207,26 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
kind = tpe.Kind()
}
- for _, v := range s.validators {
+ for idx, v := range s.validators {
if !v.Applies(s.Schema, kind) {
- debugLog("%T does not apply for %v", v, kind)
+ if s.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := v.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ s.validators[idx] = nil // prevents further (unsafe) usage
+ }
+
continue
}
- err := v.Validate(d)
- result.Merge(err)
+ result.Merge(v.Validate(d))
+ if s.Options.recycleValidators {
+ s.validators[idx] = nil // prevents further (unsafe) usage
+ }
result.Inc()
}
result.Inc()
@@ -180,81 +235,120 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
}
func (s *SchemaValidator) typeValidator() valueValidator {
- return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path}
+ return newTypeValidator(
+ s.Path,
+ s.in,
+ s.Schema.Type,
+ s.Schema.Nullable,
+ s.Schema.Format,
+ s.Options,
+ )
}
func (s *SchemaValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- Path: s.Path,
- In: s.in,
- Enum: s.Schema.Enum,
- }
+ return newBasicCommonValidator(
+ s.Path,
+ s.in,
+ s.Schema.Default,
+ s.Schema.Enum,
+ s.Options,
+ )
}
func (s *SchemaValidator) sliceValidator() valueValidator {
- return &schemaSliceValidator{
- Path: s.Path,
- In: s.in,
- MaxItems: s.Schema.MaxItems,
- MinItems: s.Schema.MinItems,
- UniqueItems: s.Schema.UniqueItems,
- AdditionalItems: s.Schema.AdditionalItems,
- Items: s.Schema.Items,
- Root: s.Root,
- KnownFormats: s.KnownFormats,
- Options: s.Options,
- }
+ return newSliceValidator(
+ s.Path,
+ s.in,
+ s.Schema.MaxItems,
+ s.Schema.MinItems,
+ s.Schema.UniqueItems,
+ s.Schema.AdditionalItems,
+ s.Schema.Items,
+ s.Root,
+ s.KnownFormats,
+ s.Options,
+ )
}
func (s *SchemaValidator) numberValidator() valueValidator {
- return &numberValidator{
- Path: s.Path,
- In: s.in,
- Default: s.Schema.Default,
- MultipleOf: s.Schema.MultipleOf,
- Maximum: s.Schema.Maximum,
- ExclusiveMaximum: s.Schema.ExclusiveMaximum,
- Minimum: s.Schema.Minimum,
- ExclusiveMinimum: s.Schema.ExclusiveMinimum,
- }
+ return newNumberValidator(
+ s.Path,
+ s.in,
+ s.Schema.Default,
+ s.Schema.MultipleOf,
+ s.Schema.Maximum,
+ s.Schema.ExclusiveMaximum,
+ s.Schema.Minimum,
+ s.Schema.ExclusiveMinimum,
+ "",
+ "",
+ s.Options,
+ )
}
func (s *SchemaValidator) stringValidator() valueValidator {
- return &stringValidator{
- Path: s.Path,
- In: s.in,
- MaxLength: s.Schema.MaxLength,
- MinLength: s.Schema.MinLength,
- Pattern: s.Schema.Pattern,
- }
+ return newStringValidator(
+ s.Path,
+ s.in,
+ nil,
+ false,
+ false,
+ s.Schema.MaxLength,
+ s.Schema.MinLength,
+ s.Schema.Pattern,
+ s.Options,
+ )
}
func (s *SchemaValidator) formatValidator() valueValidator {
- return &formatValidator{
- Path: s.Path,
- In: s.in,
- Format: s.Schema.Format,
- KnownFormats: s.KnownFormats,
- }
+ return newFormatValidator(
+ s.Path,
+ s.in,
+ s.Schema.Format,
+ s.KnownFormats,
+ s.Options,
+ )
}
func (s *SchemaValidator) schemaPropsValidator() valueValidator {
sch := s.Schema
- return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...)
+ return newSchemaPropsValidator(
+ s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats,
+ s.Options,
+ )
}
func (s *SchemaValidator) objectValidator() valueValidator {
- return &objectValidator{
- Path: s.Path,
- In: s.in,
- MaxProperties: s.Schema.MaxProperties,
- MinProperties: s.Schema.MinProperties,
- Required: s.Schema.Required,
- Properties: s.Schema.Properties,
- AdditionalProperties: s.Schema.AdditionalProperties,
- PatternProperties: s.Schema.PatternProperties,
- Root: s.Root,
- KnownFormats: s.KnownFormats,
- Options: s.Options,
+ return newObjectValidator(
+ s.Path,
+ s.in,
+ s.Schema.MaxProperties,
+ s.Schema.MinProperties,
+ s.Schema.Required,
+ s.Schema.Properties,
+ s.Schema.AdditionalProperties,
+ s.Schema.PatternProperties,
+ s.Root,
+ s.KnownFormats,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) redeem() {
+ pools.poolOfSchemaValidators.RedeemValidator(s)
+}
+
+func (s *SchemaValidator) redeemChildren() {
+ for i, validator := range s.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ s.validators[i] = nil // free up allocated children if not in pool
}
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/schema_option.go b/test/tools/vendor/github.com/go-openapi/validate/schema_option.go
index 4b4879de8b1..65eeebeaab3 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/schema_option.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/schema_option.go
@@ -18,6 +18,9 @@ package validate
type SchemaValidatorOptions struct {
EnableObjectArrayTypeCheck bool
EnableArrayMustHaveItemsCheck bool
+ recycleValidators bool
+ recycleResult bool
+ skipSchemataResult bool
}
// Option sets optional rules for schema validation
@@ -45,10 +48,36 @@ func SwaggerSchema(enable bool) Option {
}
}
-// Options returns current options
+// WithRecycleValidators saves memory allocations and makes validators
+// available for a single use of Validate() only.
+//
+// When a validator is recycled, called MUST not call the Validate() method twice.
+func WithRecycleValidators(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.recycleValidators = enable
+ }
+}
+
+func withRecycleResults(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.recycleResult = enable
+ }
+}
+
+// WithSkipSchemataResult skips the deep audit payload stored in validation Result
+func WithSkipSchemataResult(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.skipSchemataResult = enable
+ }
+}
+
+// Options returns the current set of options
func (svo SchemaValidatorOptions) Options() []Option {
return []Option{
EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck),
EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck),
+ WithRecycleValidators(svo.recycleValidators),
+ withRecycleResults(svo.recycleResult),
+ WithSkipSchemataResult(svo.skipSchemataResult),
}
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/schema_props.go b/test/tools/vendor/github.com/go-openapi/validate/schema_props.go
index 9bac3d29fb9..1ca379244dc 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/schema_props.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/schema_props.go
@@ -30,211 +30,327 @@ type schemaPropsValidator struct {
AnyOf []spec.Schema
Not *spec.Schema
Dependencies spec.Dependencies
- anyOfValidators []SchemaValidator
- allOfValidators []SchemaValidator
- oneOfValidators []SchemaValidator
+ anyOfValidators []*SchemaValidator
+ allOfValidators []*SchemaValidator
+ oneOfValidators []*SchemaValidator
notValidator *SchemaValidator
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
}
func (s *schemaPropsValidator) SetPath(path string) {
s.Path = path
}
-func newSchemaPropsValidator(path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, options ...Option) *schemaPropsValidator {
- anyValidators := make([]SchemaValidator, 0, len(anyOf))
- for _, v := range anyOf {
- v := v
- anyValidators = append(anyValidators, *NewSchemaValidator(&v, root, path, formats, options...))
+func newSchemaPropsValidator(
+ path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry,
+ opts *SchemaValidatorOptions) *schemaPropsValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
}
- allValidators := make([]SchemaValidator, 0, len(allOf))
- for _, v := range allOf {
- v := v
- allValidators = append(allValidators, *NewSchemaValidator(&v, root, path, formats, options...))
+
+ anyValidators := make([]*SchemaValidator, 0, len(anyOf))
+ for i := range anyOf {
+ anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts))
+ }
+ allValidators := make([]*SchemaValidator, 0, len(allOf))
+ for i := range allOf {
+ allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts))
}
- oneValidators := make([]SchemaValidator, 0, len(oneOf))
- for _, v := range oneOf {
- v := v
- oneValidators = append(oneValidators, *NewSchemaValidator(&v, root, path, formats, options...))
+ oneValidators := make([]*SchemaValidator, 0, len(oneOf))
+ for i := range oneOf {
+ oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts))
}
var notValidator *SchemaValidator
if not != nil {
- notValidator = NewSchemaValidator(not, root, path, formats, options...)
- }
-
- schOptions := &SchemaValidatorOptions{}
- for _, o := range options {
- o(schOptions)
- }
- return &schemaPropsValidator{
- Path: path,
- In: in,
- AllOf: allOf,
- OneOf: oneOf,
- AnyOf: anyOf,
- Not: not,
- Dependencies: deps,
- anyOfValidators: anyValidators,
- allOfValidators: allValidators,
- oneOfValidators: oneValidators,
- notValidator: notValidator,
- Root: root,
- KnownFormats: formats,
- Options: *schOptions,
+ notValidator = newSchemaValidator(not, root, path, formats, opts)
+ }
+
+ var s *schemaPropsValidator
+ if opts.recycleValidators {
+ s = pools.poolOfSchemaPropsValidators.BorrowValidator()
+ } else {
+ s = new(schemaPropsValidator)
}
+
+ s.Path = path
+ s.In = in
+ s.AllOf = allOf
+ s.OneOf = oneOf
+ s.AnyOf = anyOf
+ s.Not = not
+ s.Dependencies = deps
+ s.anyOfValidators = anyValidators
+ s.allOfValidators = allValidators
+ s.oneOfValidators = oneValidators
+ s.notValidator = notValidator
+ s.Root = root
+ s.KnownFormats = formats
+ s.Options = opts
+
+ return s
}
-func (s *schemaPropsValidator) Applies(source interface{}, kind reflect.Kind) bool {
- r := reflect.TypeOf(source) == specSchemaType
- debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind)
- return r
+func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool {
+ _, isSchema := source.(*spec.Schema)
+ return isSchema
}
func (s *schemaPropsValidator) Validate(data interface{}) *Result {
- mainResult := new(Result)
+ var mainResult *Result
+ if s.Options.recycleResult {
+ mainResult = pools.poolOfResults.BorrowResult()
+ } else {
+ mainResult = new(Result)
+ }
// Intermediary error results
// IMPORTANT! messages from underlying validators
- keepResultAnyOf := new(Result)
- keepResultOneOf := new(Result)
- keepResultAllOf := new(Result)
+ var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result
+
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeemChildren()
+ s.redeem()
+
+ // results are redeemed when merged
+ }()
+ }
- // Validates at least one in anyOf schemas
- var firstSuccess *Result
if len(s.anyOfValidators) > 0 {
- var bestFailures *Result
- succeededOnce := false
- for _, anyOfSchema := range s.anyOfValidators {
- result := anyOfSchema.Validate(data)
- // We keep inner IMPORTANT! errors no matter what MatchCount tells us
- keepResultAnyOf.Merge(result.keepRelevantErrors())
- if result.IsValid() {
- bestFailures = nil
- succeededOnce = true
- if firstSuccess == nil {
- firstSuccess = result
- }
- keepResultAnyOf = new(Result)
- break
- }
- // MatchCount is used to select errors from the schema with most positive checks
- if bestFailures == nil || result.MatchCount > bestFailures.MatchCount {
- bestFailures = result
+ keepResultAnyOf = pools.poolOfResults.BorrowResult()
+ s.validateAnyOf(data, mainResult, keepResultAnyOf)
+ }
+
+ if len(s.oneOfValidators) > 0 {
+ keepResultOneOf = pools.poolOfResults.BorrowResult()
+ s.validateOneOf(data, mainResult, keepResultOneOf)
+ }
+
+ if len(s.allOfValidators) > 0 {
+ keepResultAllOf = pools.poolOfResults.BorrowResult()
+ s.validateAllOf(data, mainResult, keepResultAllOf)
+ }
+
+ if s.notValidator != nil {
+ s.validateNot(data, mainResult)
+ }
+
+ if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map {
+ s.validateDependencies(data, mainResult)
+ }
+
+ mainResult.Inc()
+
+ // In the end we retain best failures for schema validation
+ // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!).
+ return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf)
+}
+
+func (s *schemaPropsValidator) validateAnyOf(data interface{}, mainResult, keepResultAnyOf *Result) {
+ // Validates at least one in anyOf schemas
+ var bestFailures *Result
+
+ for i, anyOfSchema := range s.anyOfValidators {
+ result := anyOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.anyOfValidators[i] = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
+
+ if result.IsValid() {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
}
+
+ _ = keepResultAnyOf.cleared()
+ mainResult.Merge(result)
+
+ return
}
- if !succeededOnce {
- mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path))
+ // MatchCount is used to select errors from the schema with most positive checks
+ if bestFailures == nil || result.MatchCount > bestFailures.MatchCount {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ bestFailures = result
+
+ continue
}
- if bestFailures != nil {
- mainResult.Merge(bestFailures)
- } else if firstSuccess != nil {
- mainResult.Merge(firstSuccess)
+
+ if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
}
}
+ mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path))
+ mainResult.Merge(bestFailures)
+}
+
+func (s *schemaPropsValidator) validateOneOf(data interface{}, mainResult, keepResultOneOf *Result) {
// Validates exactly one in oneOf schemas
- if len(s.oneOfValidators) > 0 {
- var bestFailures *Result
- var firstSuccess *Result
- validated := 0
-
- for _, oneOfSchema := range s.oneOfValidators {
- result := oneOfSchema.Validate(data)
- // We keep inner IMPORTANT! errors no matter what MatchCount tells us
- keepResultOneOf.Merge(result.keepRelevantErrors())
- if result.IsValid() {
- validated++
- bestFailures = nil
- if firstSuccess == nil {
- firstSuccess = result
- }
- keepResultOneOf = new(Result)
- continue
- }
- // MatchCount is used to select errors from the schema with most positive checks
- if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) {
- bestFailures = result
- }
+ var (
+ firstSuccess, bestFailures *Result
+ validated int
+ )
+
+ for i, oneOfSchema := range s.oneOfValidators {
+ result := oneOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.oneOfValidators[i] = nil
}
- if validated != 1 {
- var additionalMsg string
- if validated == 0 {
- additionalMsg = "Found none valid"
- } else {
- additionalMsg = fmt.Sprintf("Found %d valid alternatives", validated)
- }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
- mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, additionalMsg))
- if bestFailures != nil {
- mainResult.Merge(bestFailures)
- }
- } else if firstSuccess != nil {
- mainResult.Merge(firstSuccess)
- }
- }
+ if result.IsValid() {
+ validated++
+ _ = keepResultOneOf.cleared()
- // Validates all of allOf schemas
- if len(s.allOfValidators) > 0 {
- validated := 0
-
- for _, allOfSchema := range s.allOfValidators {
- result := allOfSchema.Validate(data)
- // We keep inner IMPORTANT! errors no matter what MatchCount tells us
- keepResultAllOf.Merge(result.keepRelevantErrors())
- // keepResultAllOf.Merge(result)
- if result.IsValid() {
- validated++
+ if firstSuccess == nil {
+ firstSuccess = result
+ } else if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
}
- mainResult.Merge(result)
+
+ continue
}
- if validated != len(s.allOfValidators) {
- additionalMsg := ""
- if validated == 0 {
- additionalMsg = ". None validated"
+ // MatchCount is used to select errors from the schema with most positive checks
+ if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
}
+ bestFailures = result
+ } else if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+ }
- mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, additionalMsg))
+ switch validated {
+ case 0:
+ mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid"))
+ mainResult.Merge(bestFailures)
+ // firstSucess necessarily nil
+ case 1:
+ mainResult.Merge(firstSuccess)
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ default:
+ mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated)))
+ mainResult.Merge(bestFailures)
+ if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(firstSuccess)
}
}
+}
- if s.notValidator != nil {
- result := s.notValidator.Validate(data)
+func (s *schemaPropsValidator) validateAllOf(data interface{}, mainResult, keepResultAllOf *Result) {
+ // Validates all of allOf schemas
+ var validated int
+
+ for i, allOfSchema := range s.allOfValidators {
+ result := allOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.allOfValidators[i] = nil
+ }
// We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultAllOf.Merge(result.keepRelevantErrors())
if result.IsValid() {
- mainResult.AddErrors(mustNotValidatechemaMsg(s.Path))
+ validated++
}
+ mainResult.Merge(result)
}
- if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map {
- val := data.(map[string]interface{})
- for key := range val {
- if dep, ok := s.Dependencies[key]; ok {
+ switch validated {
+ case 0:
+ mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated"))
+ case len(s.allOfValidators):
+ default:
+ mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ""))
+ }
+}
- if dep.Schema != nil {
- mainResult.Merge(NewSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options.Options()...).Validate(data))
- continue
- }
+func (s *schemaPropsValidator) validateNot(data interface{}, mainResult *Result) {
+ result := s.notValidator.Validate(data)
+ if s.Options.recycleValidators {
+ s.notValidator = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ if result.IsValid() {
+ mainResult.AddErrors(mustNotValidatechemaMsg(s.Path))
+ }
+ if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+}
+
+func (s *schemaPropsValidator) validateDependencies(data interface{}, mainResult *Result) {
+ val := data.(map[string]interface{})
+ for key := range val {
+ dep, ok := s.Dependencies[key]
+ if !ok {
+ continue
+ }
+
+ if dep.Schema != nil {
+ mainResult.Merge(
+ newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data),
+ )
+ continue
+ }
- if len(dep.Property) > 0 {
- for _, depKey := range dep.Property {
- if _, ok := val[depKey]; !ok {
- mainResult.AddErrors(hasADependencyMsg(s.Path, depKey))
- }
- }
+ if len(dep.Property) > 0 {
+ for _, depKey := range dep.Property {
+ if _, ok := val[depKey]; !ok {
+ mainResult.AddErrors(hasADependencyMsg(s.Path, depKey))
}
}
}
}
+}
- mainResult.Inc()
- // In the end we retain best failures for schema validation
- // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!).
- return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf)
+func (s *schemaPropsValidator) redeem() {
+ pools.poolOfSchemaPropsValidators.RedeemValidator(s)
+}
+
+func (s *schemaPropsValidator) redeemChildren() {
+ for _, v := range s.anyOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.anyOfValidators = nil
+
+ for _, v := range s.allOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.allOfValidators = nil
+
+ for _, v := range s.oneOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.oneOfValidators = nil
+
+ if s.notValidator != nil {
+ s.notValidator.redeemChildren()
+ s.notValidator.redeem()
+ s.notValidator = nil
+ }
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/slice_validator.go b/test/tools/vendor/github.com/go-openapi/validate/slice_validator.go
index aa429f5184e..13bb02087d9 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/slice_validator.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/slice_validator.go
@@ -32,7 +32,36 @@ type schemaSliceValidator struct {
Items *spec.SchemaOrArray
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
+}
+
+func newSliceValidator(path, in string,
+ maxItems, minItems *int64, uniqueItems bool,
+ additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray,
+ root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var v *schemaSliceValidator
+ if opts.recycleValidators {
+ v = pools.poolOfSliceValidators.BorrowValidator()
+ } else {
+ v = new(schemaSliceValidator)
+ }
+
+ v.Path = path
+ v.In = in
+ v.MaxItems = maxItems
+ v.MinItems = minItems
+ v.UniqueItems = uniqueItems
+ v.AdditionalItems = additionalItems
+ v.Items = items
+ v.Root = root
+ v.KnownFormats = formats
+ v.Options = opts
+
+ return v
}
func (s *schemaSliceValidator) SetPath(path string) {
@@ -46,7 +75,18 @@ func (s *schemaSliceValidator) Applies(source interface{}, kind reflect.Kind) bo
}
func (s *schemaSliceValidator) Validate(data interface{}) *Result {
- result := new(Result)
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
+
+ var result *Result
+ if s.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
if data == nil {
return result
}
@@ -54,8 +94,8 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
size := val.Len()
if s.Items != nil && s.Items.Schema != nil {
- validator := NewSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options.Options()...)
for i := 0; i < size; i++ {
+ validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options)
validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i))
value := val.Index(i)
result.mergeForSlice(val, i, validator.Validate(value.Interface()))
@@ -66,10 +106,11 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
if s.Items != nil && len(s.Items.Schemas) > 0 {
itemsSize = len(s.Items.Schemas)
for i := 0; i < itemsSize; i++ {
- validator := NewSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...)
- if val.Len() <= i {
+ if size <= i {
break
}
+
+ validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options)
result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface()))
}
}
@@ -79,7 +120,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
}
if s.AdditionalItems.Schema != nil {
for i := itemsSize; i < size-itemsSize+1; i++ {
- validator := NewSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...)
+ validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options)
result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface()))
}
}
@@ -103,3 +144,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
result.Inc()
return result
}
+
+func (s *schemaSliceValidator) redeem() {
+ pools.poolOfSliceValidators.RedeemValidator(s)
+}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/spec.go b/test/tools/vendor/github.com/go-openapi/validate/spec.go
index dff01f00be7..965452566e1 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/spec.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/spec.go
@@ -15,6 +15,8 @@
package validate
import (
+ "bytes"
+ "encoding/gob"
"encoding/json"
"fmt"
"sort"
@@ -26,23 +28,23 @@ import (
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
)
// Spec validates an OpenAPI 2.0 specification document.
//
// Returns an error flattening in a single standard error, all validation messages.
//
-// - TODO: $ref should not have siblings
-// - TODO: make sure documentation reflects all checks and warnings
-// - TODO: check on discriminators
-// - TODO: explicit message on unsupported keywords (better than "forbidden property"...)
-// - TODO: full list of unresolved refs
-// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples
-// - TODO: option to determine if we validate for go-swagger or in a more general context
-// - TODO: check on required properties to support anyOf, allOf, oneOf
+// - TODO: $ref should not have siblings
+// - TODO: make sure documentation reflects all checks and warnings
+// - TODO: check on discriminators
+// - TODO: explicit message on unsupported keywords (better than "forbidden property"...)
+// - TODO: full list of unresolved refs
+// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples
+// - TODO: option to determine if we validate for go-swagger or in a more general context
+// - TODO: check on required properties to support anyOf, allOf, oneOf
//
// NOTE: SecurityScopes are maps: no need to check uniqueness
-//
func Spec(doc *loads.Document, formats strfmt.Registry) error {
errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc)
if errs.HasErrors() {
@@ -53,25 +55,38 @@ func Spec(doc *loads.Document, formats strfmt.Registry) error {
// SpecValidator validates a swagger 2.0 spec
type SpecValidator struct {
- schema *spec.Schema // swagger 2.0 schema
- spec *loads.Document
- analyzer *analysis.Spec
- expanded *loads.Document
- KnownFormats strfmt.Registry
- Options Opts // validation options
+ schema *spec.Schema // swagger 2.0 schema
+ spec *loads.Document
+ analyzer *analysis.Spec
+ expanded *loads.Document
+ KnownFormats strfmt.Registry
+ Options Opts // validation options
+ schemaOptions *SchemaValidatorOptions
}
// NewSpecValidator creates a new swagger spec validator instance
func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator {
+ // schema options that apply to all called validators
+ schemaOptions := new(SchemaValidatorOptions)
+ for _, o := range []Option{
+ SwaggerSchema(true),
+ WithRecycleValidators(true),
+ // withRecycleResults(true),
+ } {
+ o(schemaOptions)
+ }
+
return &SpecValidator{
- schema: schema,
- KnownFormats: formats,
- Options: defaultOpts,
+ schema: schema,
+ KnownFormats: formats,
+ Options: defaultOpts,
+ schemaOptions: schemaOptions,
}
}
// Validate validates the swagger spec
func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
+ s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult
var sd *loads.Document
errs, warnings := new(Result), new(Result)
@@ -85,11 +100,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
s.spec = sd
s.analyzer = analysis.New(sd.Spec())
- // Swagger schema validator
- schv := NewSchemaValidator(s.schema, nil, "", s.KnownFormats, SwaggerSchema(true))
- var obj interface{}
-
// Raw spec unmarshalling errors
+ var obj interface{}
if err := json.Unmarshal(sd.Raw(), &obj); err != nil {
// NOTE: under normal conditions, the *load.Document has been already unmarshalled
// So this one is just a paranoid check on the behavior of the spec package
@@ -103,6 +115,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
warnings.AddErrors(errs.Warnings...)
}()
+ // Swagger schema validator
+ schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions)
errs.Merge(schv.Validate(obj)) // error -
// There may be a point in continuing to try and determine more accurate errors
if !s.Options.ContinueOnErrors && errs.HasErrors() {
@@ -130,13 +144,13 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
}
// Values provided as default MUST validate their schema
- df := &defaultValidator{SpecValidator: s}
+ df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions}
errs.Merge(df.Validate())
// Values provided as examples MUST validate their schema
// Value provided as examples in a response without schema generate a warning
// Known limitations: examples in responses for mime type not application/json are ignored (warning)
- ex := &exampleValidator{SpecValidator: s}
+ ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions}
errs.Merge(ex.Validate())
errs.Merge(s.validateNonEmptyPathParamNames())
@@ -148,22 +162,27 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
}
func (s *SpecValidator) validateNonEmptyPathParamNames() *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if s.spec.Spec().Paths == nil {
// There is no Paths object: error
res.AddErrors(noValidPathMsg())
- } else {
- if s.spec.Spec().Paths.Paths == nil {
- // Paths may be empty: warning
- res.AddWarnings(noValidPathMsg())
- } else {
- for k := range s.spec.Spec().Paths.Paths {
- if strings.Contains(k, "{}") {
- res.AddErrors(emptyPathParameterMsg(k))
- }
- }
+
+ return res
+ }
+
+ if s.spec.Spec().Paths.Paths == nil {
+ // Paths may be empty: warning
+ res.AddWarnings(noValidPathMsg())
+
+ return res
+ }
+
+ for k := range s.spec.Spec().Paths.Paths {
+ if strings.Contains(k, "{}") {
+ res.AddErrors(emptyPathParameterMsg(k))
}
}
+
return res
}
@@ -177,7 +196,7 @@ func (s *SpecValidator) validateDuplicateOperationIDs() *Result {
// fallback on possible incomplete picture because of previous errors
analyzer = s.analyzer
}
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
known := make(map[string]int)
for _, v := range analyzer.OperationIDs() {
if v != "" {
@@ -199,7 +218,7 @@ type dupProp struct {
func (s *SpecValidator) validateDuplicatePropertyNames() *Result {
// definition can't declare a property that's already defined by one of its ancestors
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for k, sch := range s.spec.Spec().Definitions {
if len(sch.AllOf) == 0 {
continue
@@ -248,7 +267,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema,
schn := nm
schc := &sch
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for schc.Ref.String() != "" {
// gather property names
@@ -285,7 +304,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema,
}
func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there
return nil, res
@@ -335,7 +354,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno
func (s *SpecValidator) validateItems() *Result {
// validate parameter, items, schema and response objects for presence of item if type is array
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for method, pi := range s.analyzer.Operations() {
for path, op := range pi {
@@ -394,7 +413,7 @@ func (s *SpecValidator) validateItems() *Result {
// Verifies constraints on array type
func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if !schema.Type.Contains(arrayType) {
return res
}
@@ -418,7 +437,7 @@ func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID str
func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result {
// Each defined operation path parameters must correspond to a named element in the API's path pattern.
// (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.)
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for _, l := range fromPath {
var matched bool
for _, r := range fromOperation {
@@ -456,7 +475,6 @@ func (s *SpecValidator) validateReferenced() *Result {
return &res
}
-// nolint: dupl
func (s *SpecValidator) validateReferencedParameters() *Result {
// Each referenceable definition should have references.
params := s.spec.Spec().Parameters
@@ -475,14 +493,13 @@ func (s *SpecValidator) validateReferencedParameters() *Result {
if len(expected) == 0 {
return nil
}
- result := new(Result)
+ result := pools.poolOfResults.BorrowResult()
for k := range expected {
result.AddWarnings(unusedParamMsg(k))
}
return result
}
-// nolint: dupl
func (s *SpecValidator) validateReferencedResponses() *Result {
// Each referenceable definition should have references.
responses := s.spec.Spec().Responses
@@ -501,14 +518,13 @@ func (s *SpecValidator) validateReferencedResponses() *Result {
if len(expected) == 0 {
return nil
}
- result := new(Result)
+ result := pools.poolOfResults.BorrowResult()
for k := range expected {
result.AddWarnings(unusedResponseMsg(k))
}
return result
}
-// nolint: dupl
func (s *SpecValidator) validateReferencedDefinitions() *Result {
// Each referenceable definition must have references.
defs := s.spec.Spec().Definitions
@@ -537,7 +553,7 @@ func (s *SpecValidator) validateReferencedDefinitions() *Result {
func (s *SpecValidator) validateRequiredDefinitions() *Result {
// Each property listed in the required array must be defined in the properties of the model
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
DEFINITIONS:
for d, schema := range s.spec.Spec().Definitions {
@@ -556,7 +572,7 @@ DEFINITIONS:
func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result {
// Takes care of recursive property definitions, which may be nested in additionalProperties schemas
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
propertyMatch := false
patternMatch := false
additionalPropertiesMatch := false
@@ -615,40 +631,42 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche
func (s *SpecValidator) validateParameters() *Result {
// - for each method, path is unique, regardless of path parameters
// e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are
- // considered duplicate paths
+ // considered duplicate paths, if StrictPathParamUniqueness is enabled.
// - each parameter should have a unique `name` and `type` combination
// - each operation should have only 1 parameter of type body
// - there must be at most 1 parameter in body
// - parameters with pattern property must specify valid patterns
// - $ref in parameters must resolve
// - path param must be required
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`)
for method, pi := range s.expandedAnalyzer().Operations() {
methodPaths := make(map[string]map[string]string)
for path, op := range pi {
- pathToAdd := pathHelp.stripParametersInPath(path)
+ if s.Options.StrictPathParamUniqueness {
+ pathToAdd := pathHelp.stripParametersInPath(path)
- // Warn on garbled path afer param stripping
- if rexGarbledPathSegment.MatchString(pathToAdd) {
- res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd))
- }
+ // Warn on garbled path afer param stripping
+ if rexGarbledPathSegment.MatchString(pathToAdd) {
+ res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd))
+ }
- // Check uniqueness of stripped paths
- if _, found := methodPaths[method][pathToAdd]; found {
+ // Check uniqueness of stripped paths
+ if _, found := methodPaths[method][pathToAdd]; found {
- // Sort names for stable, testable output
- if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 {
- res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd]))
+ // Sort names for stable, testable output
+ if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 {
+ res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd]))
+ } else {
+ res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path))
+ }
} else {
- res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path))
- }
- } else {
- if _, found := methodPaths[method]; !found {
- methodPaths[method] = map[string]string{}
- }
- methodPaths[method][pathToAdd] = path // Original non stripped path
+ if _, found := methodPaths[method]; !found {
+ methodPaths[method] = map[string]string{}
+ }
+ methodPaths[method][pathToAdd] = path // Original non stripped path
+ }
}
var bodyParams []string
@@ -659,7 +677,23 @@ func (s *SpecValidator) validateParameters() *Result {
// TODO: should be done after param expansion
res.Merge(s.checkUniqueParams(path, method, op))
+ // pick the root schema from the swagger specification which describes a parameter
+ origSchema, ok := s.schema.Definitions["parameter"]
+ if !ok {
+ panic("unexpected swagger schema: missing #/definitions/parameter")
+ }
+ // clone it once to avoid expanding a global schema (e.g. swagger spec)
+ paramSchema, err := deepCloneSchema(origSchema)
+ if err != nil {
+ panic(fmt.Errorf("can't clone schema: %v", err))
+ }
+
for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) {
+ // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation)
+ schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions)
+ obj := swag.ToDynamicJSON(pr)
+ res.Merge(schv.Validate(obj))
+
// Validate pattern regexp for parameters with a Pattern property
if _, err := compileRegexp(pr.Pattern); err != nil {
res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern))
@@ -741,7 +775,7 @@ func (s *SpecValidator) validateParameters() *Result {
func (s *SpecValidator) validateReferencesValid() *Result {
// each reference must point to a valid object
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for _, r := range s.analyzer.AllRefs() {
if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI
res.AddErrors(invalidRefMsg(r.String()))
@@ -767,7 +801,7 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio
// However, there are some issues with such a factorization:
// - analysis does not seem to fully expand params
// - param keys may be altered by x-go-name
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
pnames := make(map[string]struct{})
if op.Parameters != nil { // Safeguard
@@ -802,3 +836,17 @@ func (s *SpecValidator) expandedAnalyzer() *analysis.Spec {
}
return s.analyzer
}
+
+func deepCloneSchema(src spec.Schema) (spec.Schema, error) {
+ var b bytes.Buffer
+ if err := gob.NewEncoder(&b).Encode(src); err != nil {
+ return spec.Schema{}, err
+ }
+
+ var dst spec.Schema
+ if err := gob.NewDecoder(&b).Decode(&dst); err != nil {
+ return spec.Schema{}, err
+ }
+
+ return dst, nil
+}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/spec_messages.go b/test/tools/vendor/github.com/go-openapi/validate/spec_messages.go
index b3757adddbd..6d1f0f819cb 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/spec_messages.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/spec_messages.go
@@ -187,6 +187,8 @@ const (
// UnusedResponseWarning ...
UnusedResponseWarning = "response %q is not used anywhere"
+
+ InvalidObject = "expected an object in %q.%s"
)
// Additional error codes
@@ -347,11 +349,15 @@ func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) err
func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error {
return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ)
}
+func invalidObjectMsg(path, in string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidObject, path, in)
+}
// disabled
-// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error {
-// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method)
-// }
+//
+// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error {
+// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method)
+// }
func someParametersBrokenMsg(path, method, operationID string) errors.Error {
return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID)
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/type.go b/test/tools/vendor/github.com/go-openapi/validate/type.go
index 876467588f5..f87abb3d560 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/type.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/type.go
@@ -25,11 +25,34 @@ import (
)
type typeValidator struct {
+ Path string
+ In string
Type spec.StringOrArray
Nullable bool
Format string
- In string
- Path string
+ Options *SchemaValidatorOptions
+}
+
+func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var t *typeValidator
+ if opts.recycleValidators {
+ t = pools.poolOfTypeValidators.BorrowValidator()
+ } else {
+ t = new(typeValidator)
+ }
+
+ t.Path = path
+ t.In = in
+ t.Type = typ
+ t.Nullable = nullable
+ t.Format = format
+ t.Options = opts
+
+ return t
}
func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) {
@@ -90,7 +113,7 @@ func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) {
default:
val := reflect.ValueOf(data)
tpe := val.Type()
- switch tpe.Kind() {
+ switch tpe.Kind() { //nolint:exhaustive
case reflect.Bool:
return booleanType, ""
case reflect.String:
@@ -125,23 +148,33 @@ func (t *typeValidator) SetPath(path string) {
t.Path = path
}
-func (t *typeValidator) Applies(source interface{}, kind reflect.Kind) bool {
+func (t *typeValidator) Applies(source interface{}, _ reflect.Kind) bool {
// typeValidator applies to Schema, Parameter and Header objects
- stpe := reflect.TypeOf(source)
- r := (len(t.Type) > 0 || t.Format != "") && (stpe == specSchemaType || stpe == specParameterType || stpe == specHeaderType)
- debugLog("type validator for %q applies %t for %T (kind: %v)\n", t.Path, r, source, kind)
- return r
+ switch source.(type) {
+ case *spec.Schema:
+ case *spec.Parameter:
+ case *spec.Header:
+ default:
+ return false
+ }
+
+ return (len(t.Type) > 0 || t.Format != "")
}
func (t *typeValidator) Validate(data interface{}) *Result {
- result := new(Result)
- result.Inc()
+ if t.Options.recycleValidators {
+ defer func() {
+ t.redeem()
+ }()
+ }
+
if data == nil {
// nil or zero value for the passed structure require Type: null
if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this
- return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType))
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult)
}
- return result
+
+ return emptyResult
}
// check if the type matches, should be used in every validator chain as first item
@@ -151,8 +184,6 @@ func (t *typeValidator) Validate(data interface{}) *Result {
// infer schema type (JSON) and format from passed data type
schType, format := t.schemaInfoForType(data)
- debugLog("path: %s, schType: %s, format: %s, expType: %s, expFmt: %s, kind: %s", t.Path, schType, format, t.Type, t.Format, val.Kind().String())
-
// check numerical types
// TODO: check unsigned ints
// TODO: check json.Number (see schema.go)
@@ -163,15 +194,20 @@ func (t *typeValidator) Validate(data interface{}) *Result {
if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !(t.Type.Contains(schType) || format == t.Format || isFloatInt || isIntFloat || isLowerInt || isLowerFloat) {
// TODO: test case
- return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format))
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult)
}
if !(t.Type.Contains(numberType) || t.Type.Contains(integerType)) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) {
- return result
+ return emptyResult
}
if !(t.Type.Contains(schType) || isFloatInt || isIntFloat) {
- return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType))
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult)
}
- return result
+
+ return emptyResult
+}
+
+func (t *typeValidator) redeem() {
+ pools.poolOfTypeValidators.RedeemValidator(t)
}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/validator.go b/test/tools/vendor/github.com/go-openapi/validate/validator.go
index 38cdb9bb6cc..c083aecc9da 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/validator.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/validator.go
@@ -39,20 +39,31 @@ type itemsValidator struct {
root interface{}
path string
in string
- validators []valueValidator
+ validators [6]valueValidator
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
}
-func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry) *itemsValidator {
- iv := &itemsValidator{path: path, in: in, items: items, root: root, KnownFormats: formats}
- iv.validators = []valueValidator{
- &typeValidator{
- Type: spec.StringOrArray([]string{items.Type}),
- Nullable: items.Nullable,
- Format: items.Format,
- In: in,
- Path: path,
- },
+func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var iv *itemsValidator
+ if opts.recycleValidators {
+ iv = pools.poolOfItemsValidators.BorrowValidator()
+ } else {
+ iv = new(itemsValidator)
+ }
+
+ iv.path = path
+ iv.in = in
+ iv.items = items
+ iv.root = root
+ iv.KnownFormats = formats
+ iv.Options = opts
+ iv.validators = [6]valueValidator{
+ iv.typeValidator(),
iv.stringValidator(),
iv.formatValidator(),
iv.numberValidator(),
@@ -63,77 +74,152 @@ func newItemsValidator(path, in string, items *spec.Items, root interface{}, for
}
func (i *itemsValidator) Validate(index int, data interface{}) *Result {
+ if i.Options.recycleValidators {
+ defer func() {
+ i.redeemChildren()
+ i.redeem()
+ }()
+ }
+
tpe := reflect.TypeOf(data)
kind := tpe.Kind()
- mainResult := new(Result)
+ var result *Result
+ if i.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
path := fmt.Sprintf("%s.%d", i.path, index)
- for _, validator := range i.validators {
+ for idx, validator := range i.validators {
+ if !validator.Applies(i.root, kind) {
+ if i.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ i.validators[idx] = nil // prevents further (unsafe) usage
+ }
+
+ continue
+ }
+
validator.SetPath(path)
- if validator.Applies(i.root, kind) {
- result := validator.Validate(data)
- mainResult.Merge(result)
- mainResult.Inc()
- if result != nil && result.HasErrors() {
- return mainResult
+ err := validator.Validate(data)
+ if i.Options.recycleValidators {
+ i.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ result.Inc()
+ if err.HasErrors() {
+ result.Merge(err)
+
+ break
}
+
+ result.Merge(err)
}
}
- return mainResult
+
+ return result
+}
+
+func (i *itemsValidator) typeValidator() valueValidator {
+ return newTypeValidator(
+ i.path,
+ i.in,
+ spec.StringOrArray([]string{i.items.Type}),
+ i.items.Nullable,
+ i.items.Format,
+ i.Options,
+ )
}
func (i *itemsValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- In: i.in,
- Default: i.items.Default,
- Enum: i.items.Enum,
- }
+ return newBasicCommonValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.Enum,
+ i.Options,
+ )
}
func (i *itemsValidator) sliceValidator() valueValidator {
- return &basicSliceValidator{
- In: i.in,
- Default: i.items.Default,
- MaxItems: i.items.MaxItems,
- MinItems: i.items.MinItems,
- UniqueItems: i.items.UniqueItems,
- Source: i.root,
- Items: i.items.Items,
- KnownFormats: i.KnownFormats,
- }
+ return newBasicSliceValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.MaxItems,
+ i.items.MinItems,
+ i.items.UniqueItems,
+ i.items.Items,
+ i.root,
+ i.KnownFormats,
+ i.Options,
+ )
}
func (i *itemsValidator) numberValidator() valueValidator {
- return &numberValidator{
- In: i.in,
- Default: i.items.Default,
- MultipleOf: i.items.MultipleOf,
- Maximum: i.items.Maximum,
- ExclusiveMaximum: i.items.ExclusiveMaximum,
- Minimum: i.items.Minimum,
- ExclusiveMinimum: i.items.ExclusiveMinimum,
- Type: i.items.Type,
- Format: i.items.Format,
- }
+ return newNumberValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.MultipleOf,
+ i.items.Maximum,
+ i.items.ExclusiveMaximum,
+ i.items.Minimum,
+ i.items.ExclusiveMinimum,
+ i.items.Type,
+ i.items.Format,
+ i.Options,
+ )
}
func (i *itemsValidator) stringValidator() valueValidator {
- return &stringValidator{
- In: i.in,
- Default: i.items.Default,
- MaxLength: i.items.MaxLength,
- MinLength: i.items.MinLength,
- Pattern: i.items.Pattern,
- AllowEmptyValue: false,
- }
+ return newStringValidator(
+ "",
+ i.in,
+ i.items.Default,
+ false, // Required
+ false, // AllowEmpty
+ i.items.MaxLength,
+ i.items.MinLength,
+ i.items.Pattern,
+ i.Options,
+ )
}
func (i *itemsValidator) formatValidator() valueValidator {
- return &formatValidator{
- In: i.in,
- //Default: i.items.Default,
- Format: i.items.Format,
- KnownFormats: i.KnownFormats,
+ return newFormatValidator(
+ "",
+ i.in,
+ i.items.Format,
+ i.KnownFormats,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) redeem() {
+ pools.poolOfItemsValidators.RedeemValidator(i)
+}
+
+func (i *itemsValidator) redeemChildren() {
+ for idx, validator := range i.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ i.validators[idx] = nil // free up allocated children if not in pool
}
}
@@ -142,265 +228,501 @@ type basicCommonValidator struct {
In string
Default interface{}
Enum []interface{}
+ Options *SchemaValidatorOptions
+}
+
+func newBasicCommonValidator(path, in string, def interface{}, enum []interface{}, opts *SchemaValidatorOptions) *basicCommonValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var b *basicCommonValidator
+ if opts.recycleValidators {
+ b = pools.poolOfBasicCommonValidators.BorrowValidator()
+ } else {
+ b = new(basicCommonValidator)
+ }
+
+ b.Path = path
+ b.In = in
+ b.Default = def
+ b.Enum = enum
+ b.Options = opts
+
+ return b
}
func (b *basicCommonValidator) SetPath(path string) {
b.Path = path
}
-func (b *basicCommonValidator) Applies(source interface{}, kind reflect.Kind) bool {
+func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool {
switch source.(type) {
case *spec.Parameter, *spec.Schema, *spec.Header:
return true
+ default:
+ return false
}
- return false
}
func (b *basicCommonValidator) Validate(data interface{}) (res *Result) {
- if len(b.Enum) > 0 {
- for _, enumValue := range b.Enum {
- actualType := reflect.TypeOf(enumValue)
- if actualType != nil { // Safeguard
- expectedValue := reflect.ValueOf(data)
- if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
- if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) {
- return nil
- }
- }
- }
+ if b.Options.recycleValidators {
+ defer func() {
+ b.redeem()
+ }()
+ }
+
+ if len(b.Enum) == 0 {
+ return nil
+ }
+
+ for _, enumValue := range b.Enum {
+ actualType := reflect.TypeOf(enumValue)
+ if actualType == nil { // Safeguard
+ continue
+ }
+
+ expectedValue := reflect.ValueOf(data)
+ if expectedValue.IsValid() &&
+ expectedValue.Type().ConvertibleTo(actualType) &&
+ reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) {
+ return nil
}
- return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum))
}
- return nil
+
+ return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult)
+}
+
+func (b *basicCommonValidator) redeem() {
+ pools.poolOfBasicCommonValidators.RedeemValidator(b)
}
// A HeaderValidator has very limited subset of validations to apply
type HeaderValidator struct {
name string
header *spec.Header
- validators []valueValidator
+ validators [6]valueValidator
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
}
// NewHeaderValidator creates a new header validator object
-func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry) *HeaderValidator {
- p := &HeaderValidator{name: name, header: header, KnownFormats: formats}
- p.validators = []valueValidator{
- &typeValidator{
- Type: spec.StringOrArray([]string{header.Type}),
- Nullable: header.Nullable,
- Format: header.Format,
- In: "header",
- Path: name,
- },
+func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newHeaderValidator(name, header, formats, opts)
+}
+
+func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var p *HeaderValidator
+ if opts.recycleValidators {
+ p = pools.poolOfHeaderValidators.BorrowValidator()
+ } else {
+ p = new(HeaderValidator)
+ }
+
+ p.name = name
+ p.header = header
+ p.KnownFormats = formats
+ p.Options = opts
+ p.validators = [6]valueValidator{
+ newTypeValidator(
+ name,
+ "header",
+ spec.StringOrArray([]string{header.Type}),
+ header.Nullable,
+ header.Format,
+ p.Options,
+ ),
p.stringValidator(),
p.formatValidator(),
p.numberValidator(),
p.sliceValidator(),
p.commonValidator(),
}
+
return p
}
// Validate the value of the header against its schema
func (p *HeaderValidator) Validate(data interface{}) *Result {
- result := new(Result)
+ if p.Options.recycleValidators {
+ defer func() {
+ p.redeemChildren()
+ p.redeem()
+ }()
+ }
+
+ if data == nil {
+ return nil
+ }
+
+ var result *Result
+ if p.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
tpe := reflect.TypeOf(data)
kind := tpe.Kind()
- for _, validator := range p.validators {
- if validator.Applies(p.header, kind) {
- if err := validator.Validate(data); err != nil {
- result.Merge(err)
- if err.HasErrors() {
- return result
+ for idx, validator := range p.validators {
+ if !validator.Applies(p.header, kind) {
+ if p.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
}
+ p.validators[idx] = nil // prevents further (unsafe) usage
}
+
+ continue
+ }
+
+ err := validator.Validate(data)
+ if p.Options.recycleValidators {
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ if err.HasErrors() {
+ result.Merge(err)
+ break
+ }
+ result.Merge(err)
}
}
- return nil
+
+ return result
}
func (p *HeaderValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- Enum: p.header.Enum,
- }
+ return newBasicCommonValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.Enum,
+ p.Options,
+ )
}
func (p *HeaderValidator) sliceValidator() valueValidator {
- return &basicSliceValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- MaxItems: p.header.MaxItems,
- MinItems: p.header.MinItems,
- UniqueItems: p.header.UniqueItems,
- Items: p.header.Items,
- Source: p.header,
- KnownFormats: p.KnownFormats,
- }
+ return newBasicSliceValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.MaxItems,
+ p.header.MinItems,
+ p.header.UniqueItems,
+ p.header.Items,
+ p.header,
+ p.KnownFormats,
+ p.Options,
+ )
}
func (p *HeaderValidator) numberValidator() valueValidator {
- return &numberValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- MultipleOf: p.header.MultipleOf,
- Maximum: p.header.Maximum,
- ExclusiveMaximum: p.header.ExclusiveMaximum,
- Minimum: p.header.Minimum,
- ExclusiveMinimum: p.header.ExclusiveMinimum,
- Type: p.header.Type,
- Format: p.header.Format,
- }
+ return newNumberValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.MultipleOf,
+ p.header.Maximum,
+ p.header.ExclusiveMaximum,
+ p.header.Minimum,
+ p.header.ExclusiveMinimum,
+ p.header.Type,
+ p.header.Format,
+ p.Options,
+ )
}
func (p *HeaderValidator) stringValidator() valueValidator {
- return &stringValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- Required: true,
- MaxLength: p.header.MaxLength,
- MinLength: p.header.MinLength,
- Pattern: p.header.Pattern,
- AllowEmptyValue: false,
- }
+ return newStringValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ true,
+ false,
+ p.header.MaxLength,
+ p.header.MinLength,
+ p.header.Pattern,
+ p.Options,
+ )
}
func (p *HeaderValidator) formatValidator() valueValidator {
- return &formatValidator{
- Path: p.name,
- In: "response",
- //Default: p.header.Default,
- Format: p.header.Format,
- KnownFormats: p.KnownFormats,
+ return newFormatValidator(
+ p.name,
+ "response",
+ p.header.Format,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *HeaderValidator) redeem() {
+ pools.poolOfHeaderValidators.RedeemValidator(p)
+}
+
+func (p *HeaderValidator) redeemChildren() {
+ for idx, validator := range p.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // free up allocated children if not in pool
}
}
// A ParamValidator has very limited subset of validations to apply
type ParamValidator struct {
param *spec.Parameter
- validators []valueValidator
+ validators [6]valueValidator
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
}
// NewParamValidator creates a new param validator object
-func NewParamValidator(param *spec.Parameter, formats strfmt.Registry) *ParamValidator {
- p := &ParamValidator{param: param, KnownFormats: formats}
- p.validators = []valueValidator{
- &typeValidator{
- Type: spec.StringOrArray([]string{param.Type}),
- Nullable: param.Nullable,
- Format: param.Format,
- In: param.In,
- Path: param.Name,
- },
+func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newParamValidator(param, formats, opts)
+}
+
+func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var p *ParamValidator
+ if opts.recycleValidators {
+ p = pools.poolOfParamValidators.BorrowValidator()
+ } else {
+ p = new(ParamValidator)
+ }
+
+ p.param = param
+ p.KnownFormats = formats
+ p.Options = opts
+ p.validators = [6]valueValidator{
+ newTypeValidator(
+ param.Name,
+ param.In,
+ spec.StringOrArray([]string{param.Type}),
+ param.Nullable,
+ param.Format,
+ p.Options,
+ ),
p.stringValidator(),
p.formatValidator(),
p.numberValidator(),
p.sliceValidator(),
p.commonValidator(),
}
+
return p
}
// Validate the data against the description of the parameter
func (p *ParamValidator) Validate(data interface{}) *Result {
- result := new(Result)
+ if data == nil {
+ return nil
+ }
+
+ var result *Result
+ if p.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
tpe := reflect.TypeOf(data)
kind := tpe.Kind()
+ if p.Options.recycleValidators {
+ defer func() {
+ p.redeemChildren()
+ p.redeem()
+ }()
+ }
+
// TODO: validate type
- for _, validator := range p.validators {
- if validator.Applies(p.param, kind) {
- if err := validator.Validate(data); err != nil {
- result.Merge(err)
- if err.HasErrors() {
- return result
+ for idx, validator := range p.validators {
+ if !validator.Applies(p.param, kind) {
+ if p.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
}
+ p.validators[idx] = nil // prevents further (unsafe) usage
}
+
+ continue
+ }
+
+ err := validator.Validate(data)
+ if p.Options.recycleValidators {
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ if err.HasErrors() {
+ result.Merge(err)
+ break
+ }
+ result.Merge(err)
}
}
- return nil
+
+ return result
}
func (p *ParamValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- Enum: p.param.Enum,
- }
+ return newBasicCommonValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.Enum,
+ p.Options,
+ )
}
func (p *ParamValidator) sliceValidator() valueValidator {
- return &basicSliceValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- MaxItems: p.param.MaxItems,
- MinItems: p.param.MinItems,
- UniqueItems: p.param.UniqueItems,
- Items: p.param.Items,
- Source: p.param,
- KnownFormats: p.KnownFormats,
- }
+ return newBasicSliceValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.MaxItems,
+ p.param.MinItems,
+ p.param.UniqueItems,
+ p.param.Items,
+ p.param,
+ p.KnownFormats,
+ p.Options,
+ )
}
func (p *ParamValidator) numberValidator() valueValidator {
- return &numberValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- MultipleOf: p.param.MultipleOf,
- Maximum: p.param.Maximum,
- ExclusiveMaximum: p.param.ExclusiveMaximum,
- Minimum: p.param.Minimum,
- ExclusiveMinimum: p.param.ExclusiveMinimum,
- Type: p.param.Type,
- Format: p.param.Format,
- }
+ return newNumberValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.MultipleOf,
+ p.param.Maximum,
+ p.param.ExclusiveMaximum,
+ p.param.Minimum,
+ p.param.ExclusiveMinimum,
+ p.param.Type,
+ p.param.Format,
+ p.Options,
+ )
}
func (p *ParamValidator) stringValidator() valueValidator {
- return &stringValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- AllowEmptyValue: p.param.AllowEmptyValue,
- Required: p.param.Required,
- MaxLength: p.param.MaxLength,
- MinLength: p.param.MinLength,
- Pattern: p.param.Pattern,
- }
+ return newStringValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.Required,
+ p.param.AllowEmptyValue,
+ p.param.MaxLength,
+ p.param.MinLength,
+ p.param.Pattern,
+ p.Options,
+ )
}
func (p *ParamValidator) formatValidator() valueValidator {
- return &formatValidator{
- Path: p.param.Name,
- In: p.param.In,
- //Default: p.param.Default,
- Format: p.param.Format,
- KnownFormats: p.KnownFormats,
+ return newFormatValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Format,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *ParamValidator) redeem() {
+ pools.poolOfParamValidators.RedeemValidator(p)
+}
+
+func (p *ParamValidator) redeemChildren() {
+ for idx, validator := range p.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // free up allocated children if not in pool
}
}
type basicSliceValidator struct {
- Path string
- In string
- Default interface{}
- MaxItems *int64
- MinItems *int64
- UniqueItems bool
- Items *spec.Items
- Source interface{}
- itemsValidator *itemsValidator
- KnownFormats strfmt.Registry
+ Path string
+ In string
+ Default interface{}
+ MaxItems *int64
+ MinItems *int64
+ UniqueItems bool
+ Items *spec.Items
+ Source interface{}
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newBasicSliceValidator(
+ path, in string,
+ def interface{}, maxItems, minItems *int64, uniqueItems bool, items *spec.Items,
+ source interface{}, formats strfmt.Registry,
+ opts *SchemaValidatorOptions) *basicSliceValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var s *basicSliceValidator
+ if opts.recycleValidators {
+ s = pools.poolOfBasicSliceValidators.BorrowValidator()
+ } else {
+ s = new(basicSliceValidator)
+ }
+
+ s.Path = path
+ s.In = in
+ s.Default = def
+ s.MaxItems = maxItems
+ s.MinItems = minItems
+ s.UniqueItems = uniqueItems
+ s.Items = items
+ s.Source = source
+ s.KnownFormats = formats
+ s.Options = opts
+
+ return s
}
func (s *basicSliceValidator) SetPath(path string) {
@@ -411,60 +733,61 @@ func (s *basicSliceValidator) Applies(source interface{}, kind reflect.Kind) boo
switch source.(type) {
case *spec.Parameter, *spec.Items, *spec.Header:
return kind == reflect.Slice
+ default:
+ return false
}
- return false
}
func (s *basicSliceValidator) Validate(data interface{}) *Result {
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
val := reflect.ValueOf(data)
size := int64(val.Len())
if s.MinItems != nil {
if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.MaxItems != nil {
if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.UniqueItems {
if err := UniqueItems(s.Path, s.In, data); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
- if s.itemsValidator == nil && s.Items != nil {
- s.itemsValidator = newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats)
+ if s.Items == nil {
+ return nil
}
- if s.itemsValidator != nil {
- for i := 0; i < int(size); i++ {
- ele := val.Index(i)
- if err := s.itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() {
+ for i := 0; i < int(size); i++ {
+ itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options)
+ ele := val.Index(i)
+ if err := itemsValidator.Validate(i, ele.Interface()); err != nil {
+ if err.HasErrors() {
return err
}
+ if err.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(err)
+ }
}
}
+
return nil
}
-/* unused
-func (s *basicSliceValidator) hasDuplicates(value reflect.Value, size int) bool {
- dict := make(map[interface{}]struct{})
- for i := 0; i < size; i++ {
- ele := value.Index(i)
- if _, ok := dict[ele.Interface()]; ok {
- return true
- }
- dict[ele.Interface()] = struct{}{}
- }
- return false
+func (s *basicSliceValidator) redeem() {
+ pools.poolOfBasicSliceValidators.RedeemValidator(s)
}
-*/
type numberValidator struct {
Path string
@@ -476,8 +799,40 @@ type numberValidator struct {
Minimum *float64
ExclusiveMinimum bool
// Allows for more accurate behavior regarding integers
- Type string
- Format string
+ Type string
+ Format string
+ Options *SchemaValidatorOptions
+}
+
+func newNumberValidator(
+ path, in string, def interface{},
+ multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool,
+ typ, format string,
+ opts *SchemaValidatorOptions) *numberValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var n *numberValidator
+ if opts.recycleValidators {
+ n = pools.poolOfNumberValidators.BorrowValidator()
+ } else {
+ n = new(numberValidator)
+ }
+
+ n.Path = path
+ n.In = in
+ n.Default = def
+ n.MultipleOf = multipleOf
+ n.Maximum = maximum
+ n.ExclusiveMaximum = exclusiveMaximum
+ n.Minimum = minimum
+ n.ExclusiveMinimum = exclusiveMinimum
+ n.Type = typ
+ n.Format = format
+ n.Options = opts
+
+ return n
}
func (n *numberValidator) SetPath(path string) {
@@ -489,12 +844,10 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool {
case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header:
isInt := kind >= reflect.Int && kind <= reflect.Uint64
isFloat := kind == reflect.Float32 || kind == reflect.Float64
- r := isInt || isFloat
- debugLog("schema props validator for %q applies %t for %T (kind: %v) isInt=%t, isFloat=%t\n", n.Path, r, source, kind, isInt, isFloat)
- return r
+ return isInt || isFloat
+ default:
+ return false
}
- debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", n.Path, false, source, kind)
- return false
}
// Validate provides a validator for generic JSON numbers,
@@ -519,11 +872,18 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool {
//
// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?)
func (n *numberValidator) Validate(val interface{}) *Result {
- res := new(Result)
+ if n.Options.recycleValidators {
+ defer func() {
+ n.redeem()
+ }()
+ }
- resMultiple := new(Result)
- resMinimum := new(Result)
- resMaximum := new(Result)
+ var res, resMultiple, resMinimum, resMaximum *Result
+ if n.Options.recycleResult {
+ res = pools.poolOfResults.BorrowResult()
+ } else {
+ res = new(Result)
+ }
// Used only to attempt to validate constraint on value,
// even though value or constraint specified do not match type and format
@@ -533,68 +893,106 @@ func (n *numberValidator) Validate(val interface{}) *Result {
res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path))
if n.MultipleOf != nil {
+ resMultiple = pools.poolOfResults.BorrowResult()
+
// Is the constraint specifier within the range of the specific numeric type and format?
resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path))
if resMultiple.IsValid() {
// Constraint validated with compatible types
if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil {
- resMultiple.Merge(errorHelp.sErr(err))
+ resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
} else {
// Constraint nevertheless validated, converted as general number
if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil {
- resMultiple.Merge(errorHelp.sErr(err))
+ resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
}
}
- // nolint: dupl
if n.Maximum != nil {
+ resMaximum = pools.poolOfResults.BorrowResult()
+
// Is the constraint specifier within the range of the specific numeric type and format?
resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path))
if resMaximum.IsValid() {
// Constraint validated with compatible types
if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil {
- resMaximum.Merge(errorHelp.sErr(err))
+ resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
} else {
// Constraint nevertheless validated, converted as general number
if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil {
- resMaximum.Merge(errorHelp.sErr(err))
+ resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
}
}
- // nolint: dupl
if n.Minimum != nil {
+ resMinimum = pools.poolOfResults.BorrowResult()
+
// Is the constraint specifier within the range of the specific numeric type and format?
resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path))
if resMinimum.IsValid() {
// Constraint validated with compatible types
if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil {
- resMinimum.Merge(errorHelp.sErr(err))
+ resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
} else {
// Constraint nevertheless validated, converted as general number
if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil {
- resMinimum.Merge(errorHelp.sErr(err))
+ resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
}
}
res.Merge(resMultiple, resMinimum, resMaximum)
res.Inc()
+
return res
}
+func (n *numberValidator) redeem() {
+ pools.poolOfNumberValidators.RedeemValidator(n)
+}
+
type stringValidator struct {
+ Path string
+ In string
Default interface{}
Required bool
AllowEmptyValue bool
MaxLength *int64
MinLength *int64
Pattern string
- Path string
- In string
+ Options *SchemaValidatorOptions
+}
+
+func newStringValidator(
+ path, in string,
+ def interface{}, required, allowEmpty bool, maxLength, minLength *int64, pattern string,
+ opts *SchemaValidatorOptions) *stringValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var s *stringValidator
+ if opts.recycleValidators {
+ s = pools.poolOfStringValidators.BorrowValidator()
+ } else {
+ s = new(stringValidator)
+ }
+
+ s.Path = path
+ s.In = in
+ s.Default = def
+ s.Required = required
+ s.AllowEmptyValue = allowEmpty
+ s.MaxLength = maxLength
+ s.MinLength = minLength
+ s.Pattern = pattern
+ s.Options = opts
+
+ return s
}
func (s *stringValidator) SetPath(path string) {
@@ -604,42 +1002,50 @@ func (s *stringValidator) SetPath(path string) {
func (s *stringValidator) Applies(source interface{}, kind reflect.Kind) bool {
switch source.(type) {
case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header:
- r := kind == reflect.String
- debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind)
- return r
+ return kind == reflect.String
+ default:
+ return false
}
- debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, false, source, kind)
- return false
}
func (s *stringValidator) Validate(val interface{}) *Result {
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
+
data, ok := val.(string)
if !ok {
- return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val))
+ return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult)
}
if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") {
if err := RequiredString(s.Path, s.In, data); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.MaxLength != nil {
if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.MinLength != nil {
if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.Pattern != "" {
if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
return nil
}
+
+func (s *stringValidator) redeem() {
+ pools.poolOfStringValidators.RedeemValidator(s)
+}
diff --git a/test/tools/vendor/github.com/go-openapi/validate/values.go b/test/tools/vendor/github.com/go-openapi/validate/values.go
index e7ad8c10336..5f6f5ee61e5 100644
--- a/test/tools/vendor/github.com/go-openapi/validate/values.go
+++ b/test/tools/vendor/github.com/go-openapi/validate/values.go
@@ -120,7 +120,7 @@ func UniqueItems(path, in string, data interface{}) *errors.Validation {
// MinLength validates a string for minimum length
func MinLength(path, in, data string, minLength int64) *errors.Validation {
- strLen := int64(utf8.RuneCount([]byte(data)))
+ strLen := int64(utf8.RuneCountInString(data))
if strLen < minLength {
return errors.TooShort(path, in, minLength, data)
}
@@ -129,7 +129,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation {
// MaxLength validates a string for maximum length
func MaxLength(path, in, data string, maxLength int64) *errors.Validation {
- strLen := int64(utf8.RuneCount([]byte(data)))
+ strLen := int64(utf8.RuneCountInString(data))
if strLen > maxLength {
return errors.TooLong(path, in, maxLength, data)
}
@@ -315,7 +315,7 @@ func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.V
// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
func MaximumNativeType(path, in string, val interface{}, max float64, exclusive bool) *errors.Validation {
kind := reflect.ValueOf(val).Type().Kind()
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value := valueHelp.asInt64(val)
return MaximumInt(path, in, value, int64(max), exclusive)
@@ -345,7 +345,7 @@ func MaximumNativeType(path, in string, val interface{}, max float64, exclusive
// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
func MinimumNativeType(path, in string, val interface{}, min float64, exclusive bool) *errors.Validation {
kind := reflect.ValueOf(val).Type().Kind()
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value := valueHelp.asInt64(val)
return MinimumInt(path, in, value, int64(min), exclusive)
@@ -375,7 +375,7 @@ func MinimumNativeType(path, in string, val interface{}, min float64, exclusive
// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
func MultipleOfNativeType(path, in string, val interface{}, multipleOf float64) *errors.Validation {
kind := reflect.ValueOf(val).Type().Kind()
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value := valueHelp.asInt64(val)
return MultipleOfInt(path, in, value, int64(multipleOf))
@@ -399,7 +399,7 @@ func IsValueValidAgainstRange(val interface{}, typeName, format, prefix, path st
// What is the string representation of val
var stringRep string
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
stringRep = swag.FormatUint64(valueHelp.asUint64(val))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff.go
index 5999f494856..2faba2537af 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff.go
@@ -2,13 +2,12 @@ package commands
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"log"
"os"
- "errors"
-
"github.com/go-openapi/loads"
"github.com/go-swagger/go-swagger/cmd/swagger/commands/diff"
)
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/checks.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/checks.go
index 2ae1b8227e6..627bc5f7b20 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/checks.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/checks.go
@@ -42,8 +42,8 @@ func CompareProperties(location DifferenceLocation, schema1 *spec.Schema, schema
schema1Props := propertiesFor(schema1, getRefFn1)
schema2Props := propertiesFor(schema2, getRefFn2)
- // find deleted and changed properties
+ // find deleted and changed properties
for eachProp1Name, eachProp1 := range schema1Props {
eachProp1 := eachProp1
childLoc := addChildDiffNode(location, eachProp1Name, eachProp1.Schema)
@@ -66,7 +66,13 @@ func CompareProperties(location DifferenceLocation, schema1 *spec.Schema, schema
eachProp2 := eachProp2
if _, ok := schema1.Properties[eachProp2Name]; !ok {
childLoc := addChildDiffNode(location, eachProp2Name, &eachProp2)
- propDiffs = append(propDiffs, SpecDifference{DifferenceLocation: childLoc, Code: AddedProperty})
+
+ analyzedProp2 := schema2Props[eachProp2Name]
+ if analyzedProp2.Required {
+ propDiffs = append(propDiffs, SpecDifference{DifferenceLocation: childLoc, Code: AddedRequiredProperty})
+ } else {
+ propDiffs = append(propDiffs, SpecDifference{DifferenceLocation: childLoc, Code: AddedProperty})
+ }
}
}
return propDiffs
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/compatibility.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/compatibility.go
index d31c0e63ae4..5be29d86753 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/compatibility.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/compatibility.go
@@ -36,11 +36,12 @@ func init() {
AddedConstraint: NonBreaking,
DeletedExtension: Warning,
AddedExtension: Warning,
+ ChangedExtensionValue: Warning,
},
ForRequest: map[SpecChangeCode]Compatibility{
AddedRequiredProperty: Breaking,
DeletedProperty: Breaking,
- AddedProperty: Breaking,
+ AddedProperty: NonBreaking,
AddedOptionalParam: NonBreaking,
AddedRequiredParam: Breaking,
DeletedOptionalParam: NonBreaking,
@@ -70,6 +71,7 @@ func init() {
ChangedCollectionFormat: Breaking,
DeletedExtension: Warning,
AddedExtension: Warning,
+ ChangedExtensionValue: Warning,
},
ForChange: map[SpecChangeCode]Compatibility{
NoChangeDetected: NonBreaking,
@@ -96,6 +98,7 @@ func init() {
DeletedDefinition: NonBreaking,
DeletedExtension: Warning,
AddedExtension: Warning,
+ ChangedExtensionValue: Warning,
},
}
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/difftypes.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/difftypes.go
index 007862fb9b5..3d3d5a1c150 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/difftypes.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/difftypes.go
@@ -117,6 +117,8 @@ const (
DeletedExtension
// AddedExtension added an extension
AddedExtension
+ // ChangedExtensionValue changed an extension value
+ ChangedExtensionValue
)
var toLongStringSpecChangeCode = map[SpecChangeCode]string{
@@ -173,6 +175,7 @@ var toLongStringSpecChangeCode = map[SpecChangeCode]string{
ChangedCollectionFormat: "Changed collection format",
DeletedExtension: "Deleted Extension",
AddedExtension: "Added Extension",
+ ChangedExtensionValue: "Changed Extension Value",
}
var toStringSpecChangeCode = map[SpecChangeCode]string{
@@ -229,6 +232,7 @@ var toStringSpecChangeCode = map[SpecChangeCode]string{
ChangedCollectionFormat: "ChangedCollectionFormat",
DeletedExtension: "DeletedExtension",
AddedExtension: "AddedExtension",
+ ChangedExtensionValue: "ChangedExtensionValue",
}
var toIDSpecChangeCode = map[string]SpecChangeCode{}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/spec_analyser.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/spec_analyser.go
index 8df44aeb283..655af146590 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/spec_analyser.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/diff/spec_analyser.go
@@ -2,6 +2,7 @@ package diff
import (
"fmt"
+ "reflect"
"strings"
"github.com/go-openapi/spec"
@@ -230,7 +231,15 @@ func (sd *SpecAnalyser) analyseResponseParams() {
// deleted responses
for code1 := range op1Responses {
if _, ok := op2Responses[code1]; !ok {
- location := DifferenceLocation{URL: eachURLMethodFrom2.Path, Method: eachURLMethodFrom2.Method, Response: code1, Node: getSchemaDiffNode("Body", op1Responses[code1].Schema)}
+ location := DifferenceLocation{
+ URL: eachURLMethodFrom2.Path,
+ Method: eachURLMethodFrom2.Method,
+ Response: code1,
+ Node: getNameOnlyDiffNode("NoContent"),
+ }
+ if op1Responses[code1].Schema != nil {
+ location.Node = getSchemaDiffNode("Body", op1Responses[code1].Schema)
+ }
sd.Diffs = sd.Diffs.addDiff(SpecDifference{DifferenceLocation: location, Code: DeletedResponse})
}
}
@@ -272,11 +281,22 @@ func (sd *SpecAnalyser) analyseResponseParams() {
sd.compareDescripton(responseLocation, op1Response.Description, op2Response.Description)
if op1Response.Schema != nil {
- sd.compareSchema(
- DifferenceLocation{URL: eachURLMethodFrom2.Path, Method: eachURLMethodFrom2.Method, Response: code2, Node: getSchemaDiffNode("Body", op1Response.Schema)},
- op1Response.Schema,
- op2Response.Schema)
+ if op2Response.Schema == nil {
+ sd.Diffs = sd.Diffs.addDiff(SpecDifference{
+ DifferenceLocation: DifferenceLocation{URL: eachURLMethodFrom2.Path, Method: eachURLMethodFrom2.Method, Response: code2, Node: getSchemaDiffNode("Body", op1Response.Schema)},
+ Code: DeletedProperty})
+ } else {
+ sd.compareSchema(
+ DifferenceLocation{URL: eachURLMethodFrom2.Path, Method: eachURLMethodFrom2.Method, Response: code2, Node: getSchemaDiffNode("Body", op1Response.Schema)},
+ op1Response.Schema,
+ op2Response.Schema)
+ }
+ } else if op2Response.Schema != nil {
+ sd.Diffs = sd.Diffs.addDiff(SpecDifference{
+ DifferenceLocation: DifferenceLocation{URL: eachURLMethodFrom2.Path, Method: eachURLMethodFrom2.Method, Response: code2, Node: getSchemaDiffNode("Body", op2Response.Schema)},
+ Code: AddedProperty})
}
+
} else {
// op2Response
sd.Diffs = sd.Diffs.addDiff(SpecDifference{
@@ -293,6 +313,7 @@ func (sd *SpecAnalyser) analyseExtensions(spec1, spec2 *spec.Swagger) {
specLoc := DifferenceLocation{Node: &Node{Field: "Spec"}}
sd.checkAddedExtensions(spec1.Extensions, spec2.Extensions, specLoc, "")
sd.checkDeletedExtensions(spec1.Extensions, spec2.Extensions, specLoc, "")
+ sd.checkChangedExtensions(spec1.Extensions, spec2.Extensions, specLoc, "")
sd.analyzeInfoExtensions()
sd.analyzeTagExtensions(spec1, spec2)
@@ -302,19 +323,27 @@ func (sd *SpecAnalyser) analyseExtensions(spec1, spec2 *spec.Swagger) {
}
func (sd *SpecAnalyser) analyzeOperationExtensions() {
+ pathsIterated := make(map[string]struct{})
for urlMethod, op2 := range sd.urlMethods2 {
pathAndMethodLoc := DifferenceLocation{URL: urlMethod.Path, Method: urlMethod.Method}
if op1, ok := sd.urlMethods1[urlMethod]; ok {
- sd.checkAddedExtensions(op1.Extensions, op2.Extensions, DifferenceLocation{URL: urlMethod.Path}, "")
+ if _, ok := pathsIterated[urlMethod.Path]; !ok {
+ sd.checkAddedExtensions(op1.Extensions, op2.Extensions, DifferenceLocation{URL: urlMethod.Path}, "")
+ sd.checkChangedExtensions(op1.Extensions, op2.Extensions, DifferenceLocation{URL: urlMethod.Path}, "")
+ pathsIterated[urlMethod.Path] = struct{}{}
+ }
sd.checkAddedExtensions(op1.Operation.Responses.Extensions, op2.Operation.Responses.Extensions, pathAndMethodLoc, "Responses")
+ sd.checkChangedExtensions(op1.Operation.Responses.Extensions, op2.Operation.Responses.Extensions, pathAndMethodLoc, "Responses")
sd.checkAddedExtensions(op1.Operation.Extensions, op2.Operation.Extensions, pathAndMethodLoc, "")
-
+ sd.checkChangedExtensions(op1.Operation.Extensions, op2.Operation.Extensions, pathAndMethodLoc, "")
+ sd.checkParamExtensions(op1, op2, urlMethod)
for code, resp := range op1.Operation.Responses.StatusCodeResponses {
for hdr, h := range resp.Headers {
op2StatusCode, ok := op2.Operation.Responses.StatusCodeResponses[code]
if ok {
if _, ok = op2StatusCode.Headers[hdr]; ok {
sd.checkAddedExtensions(h.Extensions, op2StatusCode.Headers[hdr].Extensions, DifferenceLocation{URL: urlMethod.Path, Method: urlMethod.Method, Node: getNameOnlyDiffNode("Headers")}, hdr)
+ sd.checkChangedExtensions(h.Extensions, op2StatusCode.Headers[hdr].Extensions, DifferenceLocation{URL: urlMethod.Path, Method: urlMethod.Method, Node: getNameOnlyDiffNode("Headers")}, hdr)
}
}
}
@@ -326,10 +355,14 @@ func (sd *SpecAnalyser) analyzeOperationExtensions() {
}
}
+ pathsIterated = make(map[string]struct{})
for urlMethod, op1 := range sd.urlMethods1 {
pathAndMethodLoc := DifferenceLocation{URL: urlMethod.Path, Method: urlMethod.Method}
if op2, ok := sd.urlMethods2[urlMethod]; ok {
- sd.checkDeletedExtensions(op1.Extensions, op2.Extensions, DifferenceLocation{URL: urlMethod.Path}, "")
+ if _, ok := pathsIterated[urlMethod.Path]; !ok {
+ sd.checkDeletedExtensions(op1.Extensions, op2.Extensions, DifferenceLocation{URL: urlMethod.Path}, "")
+ pathsIterated[urlMethod.Path] = struct{}{}
+ }
sd.checkDeletedExtensions(op1.Operation.Responses.Extensions, op2.Operation.Responses.Extensions, pathAndMethodLoc, "Responses")
sd.checkDeletedExtensions(op1.Operation.Extensions, op2.Operation.Extensions, pathAndMethodLoc, "")
for code, resp := range op1.Operation.Responses.StatusCodeResponses {
@@ -346,11 +379,42 @@ func (sd *SpecAnalyser) analyzeOperationExtensions() {
}
}
+func (sd *SpecAnalyser) checkParamExtensions(op1 *PathItemOp, op2 *PathItemOp, urlMethod URLMethod) {
+ locations := []string{"query", "path", "body", "header", "formData"}
+ titles := []string{"Query", "Path", "Body", "Header", "FormData"}
+
+ for i, paramLocation := range locations {
+ rootNode := getNameOnlyDiffNode(titles[i])
+ params1 := getParams(op1.ParentPathItem.Parameters, op1.Operation.Parameters, paramLocation)
+ params2 := getParams(op2.ParentPathItem.Parameters, op2.Operation.Parameters, paramLocation)
+
+ location := DifferenceLocation{URL: urlMethod.Path, Method: urlMethod.Method, Node: rootNode}
+ // detect deleted param extensions
+ for paramName1, param1 := range params1 {
+ if param2, ok := params2[paramName1]; ok {
+ childLocation := location.AddNode(getSchemaDiffNode(paramName1, ¶m1.SimpleSchema))
+ sd.checkDeletedExtensions(param1.Extensions, param2.Extensions, childLocation, "")
+ }
+ }
+
+ // detect added changed params
+ for paramName2, param2 := range params2 {
+ // changed?
+ if param1, ok := params1[paramName2]; ok {
+ childLocation := location.AddNode(getSchemaDiffNode(paramName2, ¶m1.SimpleSchema))
+ sd.checkAddedExtensions(param1.Extensions, param2.Extensions, childLocation, "")
+ sd.checkChangedExtensions(param1.Extensions, param2.Extensions, childLocation, "")
+ }
+ }
+ }
+}
+
func (sd *SpecAnalyser) analyzeSecurityDefinitionExtensions(spec1 *spec.Swagger, spec2 *spec.Swagger) {
securityDefLoc := DifferenceLocation{Node: &Node{Field: "Security Definitions"}}
- for key, securityDef := range spec1.SecurityDefinitions {
+ for key, securityDef1 := range spec1.SecurityDefinitions {
if securityDef2, ok := spec2.SecurityDefinitions[key]; ok {
- sd.checkAddedExtensions(securityDef.Extensions, securityDef2.Extensions, securityDefLoc, "")
+ sd.checkAddedExtensions(securityDef1.Extensions, securityDef2.Extensions, securityDefLoc, "")
+ sd.checkChangedExtensions(securityDef1.Extensions, securityDef2.Extensions, securityDefLoc, "")
}
}
@@ -365,6 +429,7 @@ func (sd *SpecAnalyser) analyzeSchemaExtensions(schema1, schema2 *spec.Schema, c
if schema1 != nil && schema2 != nil {
diffLoc := DifferenceLocation{Response: code, URL: urlMethod.Path, Method: urlMethod.Method, Node: getSchemaDiffNode("Body", schema2)}
sd.checkAddedExtensions(schema1.Extensions, schema2.Extensions, diffLoc, "")
+ sd.checkChangedExtensions(schema1.Extensions, schema2.Extensions, diffLoc, "")
sd.checkDeletedExtensions(schema1.Extensions, schema2.Extensions, diffLoc, "")
if schema1.Items != nil && schema2.Items != nil {
sd.analyzeSchemaExtensions(schema1.Items.Schema, schema2.Items.Schema, code, urlMethod)
@@ -384,15 +449,18 @@ func (sd *SpecAnalyser) analyzeInfoExtensions() {
diffLocation := DifferenceLocation{Node: &Node{Field: "Spec Info"}}
sd.checkAddedExtensions(sd.Info1.Extensions, sd.Info2.Extensions, diffLocation, "")
sd.checkDeletedExtensions(sd.Info1.Extensions, sd.Info2.Extensions, diffLocation, "")
+ sd.checkChangedExtensions(sd.Info1.Extensions, sd.Info2.Extensions, diffLocation, "")
if sd.Info1.Contact != nil && sd.Info2.Contact != nil {
diffLocation = DifferenceLocation{Node: &Node{Field: "Spec Info.Contact"}}
sd.checkAddedExtensions(sd.Info1.Contact.Extensions, sd.Info2.Contact.Extensions, diffLocation, "")
sd.checkDeletedExtensions(sd.Info1.Contact.Extensions, sd.Info2.Contact.Extensions, diffLocation, "")
+ sd.checkChangedExtensions(sd.Info1.Contact.Extensions, sd.Info2.Contact.Extensions, diffLocation, "")
}
if sd.Info1.License != nil && sd.Info2.License != nil {
diffLocation = DifferenceLocation{Node: &Node{Field: "Spec Info.License"}}
sd.checkAddedExtensions(sd.Info1.License.Extensions, sd.Info2.License.Extensions, diffLocation, "")
sd.checkDeletedExtensions(sd.Info1.License.Extensions, sd.Info2.License.Extensions, diffLocation, "")
+ sd.checkChangedExtensions(sd.Info1.License.Extensions, sd.Info2.License.Extensions, diffLocation, "")
}
}
}
@@ -403,6 +471,7 @@ func (sd *SpecAnalyser) analyzeTagExtensions(spec1 *spec.Swagger, spec2 *spec.Sw
for _, spec1Tag := range spec1.Tags {
if spec2Tag.Name == spec1Tag.Name {
sd.checkAddedExtensions(spec1Tag.Extensions, spec2Tag.Extensions, diffLocation, "")
+ sd.checkChangedExtensions(spec1Tag.Extensions, spec2Tag.Extensions, diffLocation, "")
}
}
}
@@ -430,6 +499,21 @@ func (sd *SpecAnalyser) checkAddedExtensions(extensions1 spec.Extensions, extens
}
}
+func (sd *SpecAnalyser) checkChangedExtensions(extensions1 spec.Extensions, extensions2 spec.Extensions, diffLocation DifferenceLocation, fieldPrefix string) {
+ for extKey, ext2Val := range extensions2 {
+ if ext1Val, ok := extensions1[extKey]; ok && !reflect.DeepEqual(ext1Val, ext2Val) {
+ if fieldPrefix != "" {
+ extKey = fmt.Sprintf("%s.%s", fieldPrefix, extKey)
+ }
+ sd.Diffs = sd.Diffs.addDiff(SpecDifference{
+ DifferenceLocation: diffLocation.AddNode(&Node{Field: extKey}),
+ Code: ChangedExtensionValue,
+ Compatibility: Warning, // this could potentially be a breaking change
+ })
+ }
+ }
+}
+
func (sd *SpecAnalyser) checkDeletedExtensions(extensions1 spec.Extensions, extensions2 spec.Extensions, diffLocation DifferenceLocation, fieldPrefix string) {
for extKey := range extensions1 {
if _, ok := extensions2[extKey]; !ok {
@@ -746,7 +830,11 @@ func (sd *SpecAnalyser) schemaFromRef(ref spec.Ref, defns *spec.Definitions) (ac
}
func schemaLocationKey(location DifferenceLocation) string {
- return location.Method + location.URL + location.Node.Field + location.Node.TypeName
+ k := location.Method + location.URL + location.Node.Field + location.Node.TypeName
+ if location.Node.ChildNode != nil && location.Node.ChildNode.IsArray {
+ k += location.Node.ChildNode.Field + location.Node.ChildNode.TypeName
+ }
+ return k
}
// PropertyDefn combines a property with its required-ness
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/expand.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/expand.go
index d8a70467338..ed1fd3bfa22 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/expand.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/expand.go
@@ -63,9 +63,10 @@ func writeToFile(swspec *spec.Swagger, pretty bool, format string, output string
}
var bb interface{}
bb, err = data.MarshalYAML()
- b = bb.([]byte)
+ if err == nil {
+ b = bb.([]byte)
+ }
}
-
}
if err != nil {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/model.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/model.go
index fb8c14268d8..5dffa66eaf4 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/model.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/model.go
@@ -29,6 +29,7 @@ type modelOptions struct {
KeepSpecOrder bool `long:"keep-spec-order" description:"keep schema properties order identical to spec file"`
AllDefinitions bool `long:"all-definitions" description:"generate all model definitions regardless of usage in operations" hidden:"deprecated"`
StructTags []string `long:"struct-tags" description:"the struct tags to generate, repeat for multiple (defaults to json)"`
+ RootedErrorPath bool `long:"rooted-error-path" description:"extends validation errors with the type name instead of an empty path, in the case of arrays and maps"`
}
func (mo modelOptions) apply(opts *generator.GenOpts) {
@@ -39,6 +40,7 @@ func (mo modelOptions) apply(opts *generator.GenOpts) {
opts.PropertiesSpecOrder = mo.KeepSpecOrder
opts.IgnoreOperations = mo.AllDefinitions
opts.StructTags = mo.StructTags
+ opts.WantsRootedErrorPath = mo.RootedErrorPath
}
// WithModels adds the model options group.
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/shared.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/shared.go
index ab9725a7c12..7eb3af3fa15 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/shared.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/shared.go
@@ -17,7 +17,7 @@ import (
// FlattenCmdOptions determines options to the flatten spec preprocessing
type FlattenCmdOptions struct {
WithExpand bool `long:"with-expand" description:"expands all $ref's in spec prior to generation (shorthand to --with-flatten=expand)" group:"shared"`
- WithFlatten []string `long:"with-flatten" description:"flattens all $ref's in spec prior to generation" choice:"minimal" choice:"full" choice:"expand" choice:"verbose" choice:"noverbose" choice:"remove-unused" default:"minimal" default:"verbose" group:"shared"` // nolint: staticcheck
+ WithFlatten []string `long:"with-flatten" description:"flattens all $ref's in spec prior to generation" choice:"minimal" choice:"full" choice:"expand" choice:"verbose" choice:"noverbose" choice:"remove-unused" choice:"keep-names" default:"minimal" default:"verbose" group:"shared"`
}
// SetFlattenOptions builds flatten options from command line args
@@ -64,6 +64,8 @@ func (f *FlattenCmdOptions) SetFlattenOptions(dflt *analysis.FlattenOpts) (res *
res.Minimal = true
minimalIsSet = true
}
+ case "keep-names":
+ res.KeepNames = true
}
}
return
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go
index 3e16789b6ca..58f6a945bdb 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go
@@ -1,6 +1,3 @@
-//go:build !go1.11
-// +build !go1.11
-
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,44 +20,54 @@ import (
"os"
"strings"
+ "github.com/go-swagger/go-swagger/codescan"
+
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
- "github.com/go-swagger/go-swagger/scan"
"github.com/jessevdk/go-flags"
"gopkg.in/yaml.v3"
)
// SpecFile command to generate a swagger spec from a go application
type SpecFile struct {
- BasePath string `long:"base-path" short:"b" description:"the base path to use" default:"."`
- BuildTags string `long:"tags" short:"t" description:"build tags" default:""`
- ScanModels bool `long:"scan-models" short:"m" description:"includes models that were annotated with 'swagger:model'"`
- Compact bool `long:"compact" description:"when present, doesn't prettify the json"`
- Output flags.Filename `long:"output" short:"o" description:"the file to write to"`
- Input flags.Filename `long:"input" short:"i" description:"an input swagger file with which to merge"`
- Include []string `long:"include" short:"c" description:"include packages matching pattern"`
- Exclude []string `long:"exclude" short:"x" description:"exclude packages matching pattern"`
- IncludeTags []string `long:"include-tag" short:"" description:"include routes having specified tags (can be specified many times)"`
- ExcludeTags []string `long:"exclude-tag" short:"" description:"exclude routes having specified tags (can be specified many times)"`
+ WorkDir string `long:"work-dir" short:"w" description:"the base path to use" default:"."`
+ BuildTags string `long:"tags" short:"t" description:"build tags" default:""`
+ ScanModels bool `long:"scan-models" short:"m" description:"includes models that were annotated with 'swagger:model'"`
+ Compact bool `long:"compact" description:"when present, doesn't prettify the json"`
+ Output flags.Filename `long:"output" short:"o" description:"the file to write to"`
+ Input flags.Filename `long:"input" short:"i" description:"an input swagger file with which to merge"`
+ Include []string `long:"include" short:"c" description:"include packages matching pattern"`
+ Exclude []string `long:"exclude" short:"x" description:"exclude packages matching pattern"`
+ IncludeTags []string `long:"include-tag" short:"" description:"include routes having specified tags (can be specified many times)"`
+ ExcludeTags []string `long:"exclude-tag" short:"" description:"exclude routes having specified tags (can be specified many times)"`
+ ExcludeDeps bool `long:"exclude-deps" short:"" description:"exclude all dependencies of project"`
+ SetXNullableForPointers bool `long:"nullable-pointers" short:"n" description:"set x-nullable extension to true automatically for fields of pointer types without 'omitempty'"`
}
// Execute runs this command
func (s *SpecFile) Execute(args []string) error {
+ if len(args) == 0 { // by default consider all the paths under the working directory
+ args = []string{"./..."}
+ }
+
input, err := loadSpec(string(s.Input))
if err != nil {
return err
}
- var opts scan.Opts
- opts.BasePath = s.BasePath
- opts.Input = input
+ var opts codescan.Options
+ opts.Packages = args
+ opts.WorkDir = s.WorkDir
+ opts.InputSpec = input
opts.ScanModels = s.ScanModels
opts.BuildTags = s.BuildTags
opts.Include = s.Include
opts.Exclude = s.Exclude
opts.IncludeTags = s.IncludeTags
opts.ExcludeTags = s.ExcludeTags
- swspec, err := scan.Application(opts)
+ opts.ExcludeDeps = s.ExcludeDeps
+ opts.SetXNullableForPointers = s.SetXNullableForPointers
+ swspec, err := codescan.Run(&opts)
if err != nil {
return err
}
@@ -100,7 +107,7 @@ func writeToFile(swspec *spec.Swagger, pretty bool, output string) error {
fmt.Println(string(b))
return nil
}
- return os.WriteFile(output, b, 0644)
+ return os.WriteFile(output, b, 0644) // #nosec
}
func marshalToJSONFormat(swspec *spec.Swagger, pretty bool) ([]byte, error) {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec_go111.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec_go111.go
deleted file mode 100644
index bf2295864f8..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec_go111.go
+++ /dev/null
@@ -1,119 +0,0 @@
-//go:build go1.11
-// +build go1.11
-
-package generate
-
-import (
- "encoding/json"
- "fmt"
- "os"
- "strings"
-
- "github.com/go-swagger/go-swagger/codescan"
-
- "github.com/go-openapi/loads"
- "github.com/go-openapi/spec"
- "github.com/jessevdk/go-flags"
- "gopkg.in/yaml.v3"
-)
-
-// SpecFile command to generate a swagger spec from a go application
-type SpecFile struct {
- WorkDir string `long:"work-dir" short:"w" description:"the base path to use" default:"."`
- BuildTags string `long:"tags" short:"t" description:"build tags" default:""`
- ScanModels bool `long:"scan-models" short:"m" description:"includes models that were annotated with 'swagger:model'"`
- Compact bool `long:"compact" description:"when present, doesn't prettify the json"`
- Output flags.Filename `long:"output" short:"o" description:"the file to write to"`
- Input flags.Filename `long:"input" short:"i" description:"an input swagger file with which to merge"`
- Include []string `long:"include" short:"c" description:"include packages matching pattern"`
- Exclude []string `long:"exclude" short:"x" description:"exclude packages matching pattern"`
- IncludeTags []string `long:"include-tag" short:"" description:"include routes having specified tags (can be specified many times)"`
- ExcludeTags []string `long:"exclude-tag" short:"" description:"exclude routes having specified tags (can be specified many times)"`
- ExcludeDeps bool `long:"exclude-deps" short:"" description:"exclude all dependencies of project"`
-}
-
-// Execute runs this command
-func (s *SpecFile) Execute(args []string) error {
- if len(args) == 0 { // by default consider all the paths under the working directory
- args = []string{"./..."}
- }
-
- input, err := loadSpec(string(s.Input))
- if err != nil {
- return err
- }
-
- var opts codescan.Options
- opts.Packages = args
- opts.WorkDir = s.WorkDir
- opts.InputSpec = input
- opts.ScanModels = s.ScanModels
- opts.BuildTags = s.BuildTags
- opts.Include = s.Include
- opts.Exclude = s.Exclude
- opts.IncludeTags = s.IncludeTags
- opts.ExcludeTags = s.ExcludeTags
- opts.ExcludeDeps = s.ExcludeDeps
- swspec, err := codescan.Run(&opts)
- if err != nil {
- return err
- }
-
- return writeToFile(swspec, !s.Compact, string(s.Output))
-}
-
-func loadSpec(input string) (*spec.Swagger, error) {
- if fi, err := os.Stat(input); err == nil {
- if fi.IsDir() {
- return nil, fmt.Errorf("expected %q to be a file not a directory", input)
- }
- sp, err := loads.Spec(input)
- if err != nil {
- return nil, err
- }
- return sp.Spec(), nil
- }
- return nil, nil
-}
-
-func writeToFile(swspec *spec.Swagger, pretty bool, output string) error {
- var b []byte
- var err error
-
- if strings.HasSuffix(output, "yml") || strings.HasSuffix(output, "yaml") {
- b, err = marshalToYAMLFormat(swspec)
- } else {
- b, err = marshalToJSONFormat(swspec, pretty)
- }
-
- if err != nil {
- return err
- }
-
- if output == "" {
- fmt.Println(string(b))
- return nil
- }
- return os.WriteFile(output, b, 0644) // #nosec
-}
-
-func marshalToJSONFormat(swspec *spec.Swagger, pretty bool) ([]byte, error) {
- if pretty {
- return json.MarshalIndent(swspec, "", " ")
- }
- return json.Marshal(swspec)
-}
-
-func marshalToYAMLFormat(swspec *spec.Swagger) ([]byte, error) {
- b, err := json.Marshal(swspec)
- if err != nil {
- return nil, err
- }
-
- var jsonObj interface{}
- if err := yaml.Unmarshal(b, &jsonObj); err != nil {
- return nil, err
- }
-
- return yaml.Marshal(jsonObj)
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/serve.go b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/serve.go
index aeea4ceddde..63705f618f0 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/serve.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/serve.go
@@ -23,7 +23,7 @@ type ServeCmd struct {
BasePath string `long:"base-path" description:"the base path to serve the spec and UI at"`
Flavor string `short:"F" long:"flavor" description:"the flavor of docs, can be swagger or redoc" default:"redoc" choice:"redoc" choice:"swagger"`
DocURL string `long:"doc-url" description:"override the url which takes a url query param to render the doc ui"`
- NoOpen bool `long:"no-open" description:"when present won't open the the browser to show the url"`
+ NoOpen bool `long:"no-open" description:"when present won't open the browser to show the url"`
NoUI bool `long:"no-ui" description:"when present, only the swagger spec will be served"`
Flatten bool `long:"flatten" description:"when present, flatten the swagger spec before serving it"`
Port int `long:"port" short:"p" description:"the port to serve this site" env:"PORT"`
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/application.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/application.go
index 952d9fb1f88..ebaa6261a30 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/application.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/application.go
@@ -42,16 +42,17 @@ const (
// Options for the scanner
type Options struct {
- Packages []string
- InputSpec *spec.Swagger
- ScanModels bool
- WorkDir string
- BuildTags string
- ExcludeDeps bool
- Include []string
- Exclude []string
- IncludeTags []string
- ExcludeTags []string
+ Packages []string
+ InputSpec *spec.Swagger
+ ScanModels bool
+ WorkDir string
+ BuildTags string
+ ExcludeDeps bool
+ Include []string
+ Exclude []string
+ IncludeTags []string
+ ExcludeTags []string
+ SetXNullableForPointers bool
}
type scanCtx struct {
@@ -94,7 +95,7 @@ func newScanCtx(opts *Options) (*scanCtx, error) {
app, err := newTypeIndex(pkgs, opts.ExcludeDeps,
sliceToSet(opts.IncludeTags), sliceToSet(opts.ExcludeTags),
- opts.Include, opts.Exclude)
+ opts.Include, opts.Exclude, opts.SetXNullableForPointers)
if err != nil {
return nil, err
}
@@ -163,7 +164,7 @@ DECLS:
return
}
-func (d *entityDecl) OperationIDS() (result []string) {
+func (d *entityDecl) OperationIDs() (result []string) {
if d == nil || d.Comments == nil {
return nil
}
@@ -281,7 +282,6 @@ func (s *scanCtx) FindDecl(pkgPath, name string) (*entityDecl, bool) {
}
return decl, true
}
-
}
}
}
@@ -399,7 +399,7 @@ func (s *scanCtx) FindEnumValues(pkg *packages.Package, enumName string) (list [
}
for i, doc := range vs.Doc.List {
if doc.Text != "" {
- var text = strings.TrimPrefix(doc.Text, "//")
+ text := strings.TrimPrefix(doc.Text, "//")
desc.WriteString(text)
if i < docListLen-1 {
desc.WriteString(" ")
@@ -419,19 +419,17 @@ func (s *scanCtx) FindEnumValues(pkg *packages.Package, enumName string) (list [
return list, descList, true
}
-func newTypeIndex(pkgs []*packages.Package,
- excludeDeps bool, includeTags, excludeTags map[string]bool,
- includePkgs, excludePkgs []string) (*typeIndex, error) {
-
+func newTypeIndex(pkgs []*packages.Package, excludeDeps bool, includeTags, excludeTags map[string]bool, includePkgs, excludePkgs []string, setXNullableForPointers bool) (*typeIndex, error) {
ac := &typeIndex{
- AllPackages: make(map[string]*packages.Package),
- Models: make(map[*ast.Ident]*entityDecl),
- ExtraModels: make(map[*ast.Ident]*entityDecl),
- excludeDeps: excludeDeps,
- includeTags: includeTags,
- excludeTags: excludeTags,
- includePkgs: includePkgs,
- excludePkgs: excludePkgs,
+ AllPackages: make(map[string]*packages.Package),
+ Models: make(map[*ast.Ident]*entityDecl),
+ ExtraModels: make(map[*ast.Ident]*entityDecl),
+ excludeDeps: excludeDeps,
+ includeTags: includeTags,
+ excludeTags: excludeTags,
+ includePkgs: includePkgs,
+ excludePkgs: excludePkgs,
+ setXNullableForPointers: setXNullableForPointers,
}
if err := ac.build(pkgs); err != nil {
return nil, err
@@ -440,19 +438,20 @@ func newTypeIndex(pkgs []*packages.Package,
}
type typeIndex struct {
- AllPackages map[string]*packages.Package
- Models map[*ast.Ident]*entityDecl
- ExtraModels map[*ast.Ident]*entityDecl
- Meta []metaSection
- Routes []parsedPathContent
- Operations []parsedPathContent
- Parameters []*entityDecl
- Responses []*entityDecl
- excludeDeps bool
- includeTags map[string]bool
- excludeTags map[string]bool
- includePkgs []string
- excludePkgs []string
+ AllPackages map[string]*packages.Package
+ Models map[*ast.Ident]*entityDecl
+ ExtraModels map[*ast.Ident]*entityDecl
+ Meta []metaSection
+ Routes []parsedPathContent
+ Operations []parsedPathContent
+ Parameters []*entityDecl
+ Responses []*entityDecl
+ excludeDeps bool
+ includeTags map[string]bool
+ excludeTags map[string]bool
+ includePkgs []string
+ excludePkgs []string
+ setXNullableForPointers bool
}
func (a *typeIndex) build(pkgs []*packages.Package) error {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/operations.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/operations.go
index c6a194526ba..b5caedc2f27 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/operations.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/operations.go
@@ -29,10 +29,10 @@ func (o *operationsBuilder) Build(tgt *spec.Paths) error {
sp.setDescription = func(lines []string) { op.Description = joinDropLast(lines) }
if err := sp.Parse(o.path.Remaining); err != nil {
- return fmt.Errorf("operation (%s): %v", op.ID, err)
+ return fmt.Errorf("operation (%s): %w", op.ID, err)
}
if err := sp.UnmarshalSpec(op.UnmarshalJSON); err != nil {
- return fmt.Errorf("operation (%s): %v", op.ID, err)
+ return fmt.Errorf("operation (%s): %w", op.ID, err)
}
if tgt.Paths == nil {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go
index b00916825fb..1ee769ae82d 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go
@@ -8,8 +8,6 @@ import (
"golang.org/x/tools/go/ast/astutil"
- "github.com/pkg/errors"
-
"github.com/go-openapi/spec"
)
@@ -117,6 +115,7 @@ func (sv paramValidations) SetMaximum(val float64, exclusive bool) {
sv.current.Maximum = &val
sv.current.ExclusiveMaximum = exclusive
}
+
func (sv paramValidations) SetMinimum(val float64, exclusive bool) {
sv.current.Minimum = &val
sv.current.ExclusiveMinimum = exclusive
@@ -143,6 +142,7 @@ func (sv itemsValidations) SetMaximum(val float64, exclusive bool) {
sv.current.Maximum = &val
sv.current.ExclusiveMaximum = exclusive
}
+
func (sv itemsValidations) SetMinimum(val float64, exclusive bool) {
sv.current.Minimum = &val
sv.current.ExclusiveMinimum = exclusive
@@ -168,12 +168,11 @@ type parameterBuilder struct {
}
func (p *parameterBuilder) Build(operations map[string]*spec.Operation) error {
-
// check if there is a swagger:parameters tag that is followed by one or more words,
// these words are the ids of the operations this parameter struct applies to
// once type name is found convert it to a schema, by looking up the schema in the
// parameters dictionary that got passed into this parse method
- for _, opid := range p.decl.OperationIDS() {
+ for _, opid := range p.decl.OperationIDs() {
operation, ok := operations[opid]
if !ok {
operation = new(spec.Operation)
@@ -210,10 +209,10 @@ func (p *parameterBuilder) buildFromType(otpe types.Type, op *spec.Operation, se
}
return p.buildFromStruct(p.decl, stpe, op, seen)
default:
- return errors.Errorf("unhandled type (%T): %s", stpe, o.Type().Underlying().String())
+ return fmt.Errorf("unhandled type (%T): %s", stpe, o.Type().Underlying().String())
}
default:
- return errors.Errorf("unhandled type (%T): %s", otpe, tpe.String())
+ return fmt.Errorf("unhandled type (%T): %s", otpe, tpe.String())
}
}
@@ -279,9 +278,9 @@ func (p *parameterBuilder) buildFromField(fld *types.Var, tpe types.Type, typabl
p.postDecls = append(p.postDecls, sb.postDecls...)
return nil
}
- return errors.Errorf("unable to find package and source file for: %s", ftpe.String())
+ return fmt.Errorf("unable to find package and source file for: %s", ftpe.String())
default:
- return errors.Errorf("unknown type for %s: %T", fld.String(), fld.Type())
+ return fmt.Errorf("unknown type for %s: %T", fld.String(), fld.Type())
}
}
@@ -340,7 +339,7 @@ func (p *parameterBuilder) buildFromStruct(decl *entityDecl, tpe *types.Struct,
continue
}
- name, ignore, _, err := parseJSONTag(afld)
+ name, ignore, _, _, err := parseJSONTag(afld)
if err != nil {
return err
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser.go
index 9637e6c224a..3733d50df17 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser.go
@@ -2,8 +2,10 @@ package codescan
import (
"encoding/json"
+ "errors"
"fmt"
"go/ast"
+ "log"
"reflect"
"regexp"
"strconv"
@@ -11,7 +13,6 @@ import (
"github.com/go-openapi/loads/fmts"
"github.com/go-openapi/spec"
- "github.com/pkg/errors"
"gopkg.in/yaml.v3"
)
@@ -1466,7 +1467,7 @@ func (ss *setOpResponses) Parse(lines []string) error {
return nil
}
-func parseEnum(val string, s *spec.SimpleSchema) []interface{} {
+func parseEnumOld(val string, s *spec.SimpleSchema) []interface{} {
list := strings.Split(val, ",")
interfaceSlice := make([]interface{}, len(list))
for i, d := range list {
@@ -1481,6 +1482,35 @@ func parseEnum(val string, s *spec.SimpleSchema) []interface{} {
return interfaceSlice
}
+func parseEnum(val string, s *spec.SimpleSchema) []interface{} {
+ // obtain the raw elements of the list to latter process them with the parseValueFromSchema
+ var rawElements []json.RawMessage
+ if err := json.Unmarshal([]byte(val), &rawElements); err != nil {
+ log.Print("WARNING: item list for enum is not a valid JSON array, using the old deprecated format")
+ return parseEnumOld(val, s)
+ }
+
+ interfaceSlice := make([]interface{}, len(rawElements))
+
+ for i, d := range rawElements {
+
+ ds, err := strconv.Unquote(string(d))
+ if err != nil {
+ ds = string(d)
+ }
+
+ v, err := parseValueFromSchema(ds, s)
+ if err != nil {
+ interfaceSlice[i] = ds
+ continue
+ }
+
+ interfaceSlice[i] = v
+ }
+
+ return interfaceSlice
+}
+
// AlphaChars used when parsing for Vendor Extensions
const AlphaChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser_helpers.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser_helpers.go
index 6ffac76afd2..a9f8be8ceec 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser_helpers.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser_helpers.go
@@ -1,6 +1,3 @@
-//go:build go1.19
-// +build go1.19
-
package codescan
import (
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser_helpers_go118.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser_helpers_go118.go
deleted file mode 100644
index 62eb59a9693..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/parser_helpers_go118.go
+++ /dev/null
@@ -1,42 +0,0 @@
-//go:build !go1.19
-// +build !go1.19
-
-package codescan
-
-import "strings"
-
-// a shared function that can be used to split given headers
-// into a title and description
-func collectScannerTitleDescription(headers []string) (title, desc []string) {
- hdrs := cleanupScannerLines(headers, rxUncommentHeaders, nil)
-
- idx := -1
- for i, line := range hdrs {
- if strings.TrimSpace(line) == "" {
- idx = i
- break
- }
- }
-
- if idx > -1 {
- title = hdrs[:idx]
- if len(hdrs) > idx+1 {
- desc = hdrs[idx+1:]
- } else {
- desc = nil
- }
- return
- }
-
- if len(hdrs) > 0 {
- line := hdrs[0]
- if rxPunctuationEnd.MatchString(line) {
- title = []string{line}
- desc = hdrs[1:]
- } else {
- desc = hdrs
- }
- }
-
- return
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/responses.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/responses.go
index 350cd3a7bf3..39274baf0f6 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/responses.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/responses.go
@@ -1,16 +1,14 @@
package codescan
import (
+ "errors"
"fmt"
"go/ast"
"go/types"
"strings"
- "github.com/pkg/errors"
-
- "golang.org/x/tools/go/ast/astutil"
-
"github.com/go-openapi/spec"
+ "golang.org/x/tools/go/ast/astutil"
)
type responseTypable struct {
@@ -97,22 +95,50 @@ func (sv headerValidations) SetMaximum(val float64, exclusive bool) {
sv.current.Maximum = &val
sv.current.ExclusiveMaximum = exclusive
}
+
func (sv headerValidations) SetMinimum(val float64, exclusive bool) {
sv.current.Minimum = &val
sv.current.ExclusiveMinimum = exclusive
}
-func (sv headerValidations) SetMultipleOf(val float64) { sv.current.MultipleOf = &val }
-func (sv headerValidations) SetMinItems(val int64) { sv.current.MinItems = &val }
-func (sv headerValidations) SetMaxItems(val int64) { sv.current.MaxItems = &val }
-func (sv headerValidations) SetMinLength(val int64) { sv.current.MinLength = &val }
-func (sv headerValidations) SetMaxLength(val int64) { sv.current.MaxLength = &val }
-func (sv headerValidations) SetPattern(val string) { sv.current.Pattern = val }
-func (sv headerValidations) SetUnique(val bool) { sv.current.UniqueItems = val }
-func (sv headerValidations) SetCollectionFormat(val string) { sv.current.CollectionFormat = val }
+
+func (sv headerValidations) SetMultipleOf(val float64) {
+ sv.current.MultipleOf = &val
+}
+
+func (sv headerValidations) SetMinItems(val int64) {
+ sv.current.MinItems = &val
+}
+
+func (sv headerValidations) SetMaxItems(val int64) {
+ sv.current.MaxItems = &val
+}
+
+func (sv headerValidations) SetMinLength(val int64) {
+ sv.current.MinLength = &val
+}
+
+func (sv headerValidations) SetMaxLength(val int64) {
+ sv.current.MaxLength = &val
+}
+
+func (sv headerValidations) SetPattern(val string) {
+ sv.current.Pattern = val
+}
+
+func (sv headerValidations) SetUnique(val bool) {
+ sv.current.UniqueItems = val
+}
+
+func (sv headerValidations) SetCollectionFormat(val string) {
+ sv.current.CollectionFormat = val
+}
+
func (sv headerValidations) SetEnum(val string) {
sv.current.Enum = parseEnum(val, &spec.SimpleSchema{Type: sv.current.Type, Format: sv.current.Format})
}
+
func (sv headerValidations) SetDefault(val interface{}) { sv.current.Default = val }
+
func (sv headerValidations) SetExample(val interface{}) { sv.current.Example = val }
type responseBuilder struct {
@@ -215,9 +241,9 @@ func (r *responseBuilder) buildFromField(fld *types.Var, tpe types.Type, typable
r.postDecls = append(r.postDecls, sb.postDecls...)
return nil
}
- return errors.Errorf("unable to find package and source file for: %s", ftpe.String())
+ return fmt.Errorf("unable to find package and source file for: %s", ftpe.String())
default:
- return errors.Errorf("unknown type for %s: %T", fld.String(), fld.Type())
+ return fmt.Errorf("unknown type for %s: %T", fld.String(), fld.Type())
}
}
@@ -256,7 +282,7 @@ func (r *responseBuilder) buildFromType(otpe types.Type, resp *spec.Response, se
r.postDecls = append(r.postDecls, sb.postDecls...)
return nil
}
- return errors.Errorf("responses can only be structs, did you mean for %s to be the response body?", otpe.String())
+ return fmt.Errorf("responses can only be structs, did you mean for %s to be the response body?", otpe.String())
}
default:
return errors.New("anonymous types are currently not supported for responses")
@@ -307,7 +333,7 @@ func (r *responseBuilder) buildFromStruct(decl *entityDecl, tpe *types.Struct, r
continue
}
- name, ignore, _, err := parseJSONTag(afld)
+ name, ignore, _, _, err := parseJSONTag(afld)
if err != nil {
return err
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/routes.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/routes.go
index af58e43f38f..20cbf2c7b8f 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/routes.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/routes.go
@@ -58,7 +58,6 @@ type routesBuilder struct {
}
func (r *routesBuilder) Build(tgt *spec.Paths) error {
-
pthObj := tgt.Paths[r.route.Path]
op := setPathOperation(
r.route.Method, r.route.ID,
@@ -82,7 +81,7 @@ func (r *routesBuilder) Build(tgt *spec.Paths) error {
newMultiLineTagParser("Extensions", newSetExtensions(opExtensionsSetter(op)), true),
}
if err := sp.Parse(r.route.Remaining); err != nil {
- return fmt.Errorf("operation (%s): %v", op.ID, err)
+ return fmt.Errorf("operation (%s): %w", op.ID, err)
}
if tgt.Paths == nil {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/schema.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/schema.go
index 8c6723040a2..b06909128ea 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/schema.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/schema.go
@@ -2,6 +2,7 @@ package codescan
import (
"encoding/json"
+ "errors"
"fmt"
"go/ast"
"go/importer"
@@ -15,7 +16,6 @@ import (
"golang.org/x/tools/go/ast/astutil"
"github.com/go-openapi/spec"
- "github.com/pkg/errors"
)
func addExtension(ve *spec.VendorExtensible, key string, value interface{}) {
@@ -92,6 +92,7 @@ func (sv schemaValidations) SetMaximum(val float64, exclusive bool) {
sv.current.Maximum = &val
sv.current.ExclusiveMaximum = exclusive
}
+
func (sv schemaValidations) SetMinimum(val float64, exclusive bool) {
sv.current.Minimum = &val
sv.current.ExclusiveMinimum = exclusive
@@ -358,9 +359,16 @@ func (s *schemaBuilder) buildFromType(tpe types.Type, tgt swaggerTypable) error
return nil
}
+ if s.decl.Spec.Assign.IsValid() {
+ return s.buildFromType(titpe.Underlying(), tgt)
+ }
+
+ if titpe.TypeArgs() != nil && titpe.TypeArgs().Len() > 0 {
+ return s.buildFromType(titpe.Underlying(), tgt)
+ }
+
switch utitpe := tpe.Underlying().(type) {
case *types.Struct:
-
if decl, ok := s.ctx.FindModel(tio.Pkg().Path(), tio.Name()); ok {
if decl.Type.Obj().Pkg().Path() == "time" && decl.Type.Obj().Name() == "Time" {
tgt.Typed("string", "date-time")
@@ -401,7 +409,7 @@ func (s *schemaBuilder) buildFromType(tpe types.Type, tgt swaggerTypable) error
}
if defaultName, ok := defaultName(cmt); ok {
- debugLog(defaultName)
+ debugLog(defaultName) //nolint:govet
return nil
}
@@ -645,6 +653,12 @@ func (s *schemaBuilder) buildFromInterface(decl *entityDecl, it *types.Interface
ps.AddExtension("x-go-name", fld.Name())
}
+ if s.ctx.app.setXNullableForPointers {
+ if _, isPointer := fld.Type().(*types.Signature).Results().At(0).Type().(*types.Pointer); isPointer && (ps.Extensions == nil || (ps.Extensions["x-nullable"] == nil && ps.Extensions["x-isnullable"] == nil)) {
+ ps.AddExtension("x-nullable", true)
+ }
+ }
+
seen[name] = fld.Name()
tgt.Properties[name] = ps
}
@@ -710,7 +724,7 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche
continue
}
- _, ignore, _, err := parseJSONTag(afld)
+ _, ignore, _, _, err := parseJSONTag(afld)
if err != nil {
return err
}
@@ -810,7 +824,7 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche
continue
}
- name, ignore, isString, err := parseJSONTag(afld)
+ name, ignore, isString, omitEmpty, err := parseJSONTag(afld)
if err != nil {
return err
}
@@ -847,6 +861,13 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche
addExtension(&ps.VendorExtensible, "x-go-name", fld.Name())
}
+ if s.ctx.app.setXNullableForPointers {
+ if _, isPointer := fld.Type().(*types.Pointer); isPointer && !omitEmpty &&
+ (ps.Extensions == nil || (ps.Extensions["x-nullable"] == nil && ps.Extensions["x-isnullable"] == nil)) {
+ ps.AddExtension("x-nullable", true)
+ }
+ }
+
// we have 2 cases:
// 1. field with different name override tag
// 2. field with different name removes tag
@@ -892,7 +913,7 @@ func (s *schemaBuilder) buildAllOf(tpe types.Type, schema *spec.Schema) error {
}
return s.buildFromStruct(decl, utpe, schema, make(map[string]string))
}
- return errors.Errorf("can't find source file for struct: %s", ftpe.String())
+ return fmt.Errorf("can't find source file for struct: %s", ftpe.String())
case *types.Interface:
decl, found := s.ctx.FindModel(ftpe.Obj().Pkg().Path(), ftpe.Obj().Name())
if found {
@@ -905,7 +926,7 @@ func (s *schemaBuilder) buildAllOf(tpe types.Type, schema *spec.Schema) error {
}
return s.buildFromInterface(decl, utpe, schema, make(map[string]string))
}
- return errors.Errorf("can't find source file for interface: %s", ftpe.String())
+ return fmt.Errorf("can't find source file for interface: %s", ftpe.String())
default:
log.Printf("WARNING: can't figure out object type for allOf named type (%T): %v", ftpe, ftpe.Underlying())
return fmt.Errorf("unable to locate source file for allOf %s", utpe.String())
@@ -929,13 +950,13 @@ func (s *schemaBuilder) buildEmbedded(tpe types.Type, schema *spec.Schema, seen
if found {
return s.buildFromStruct(decl, utpe, schema, seen)
}
- return errors.Errorf("can't find source file for struct: %s", ftpe.String())
+ return fmt.Errorf("can't find source file for struct: %s", ftpe.String())
case *types.Interface:
decl, found := s.ctx.FindModel(ftpe.Obj().Pkg().Path(), ftpe.Obj().Name())
if found {
return s.buildFromInterface(decl, utpe, schema, seen)
}
- return errors.Errorf("can't find source file for struct: %s", ftpe.String())
+ return fmt.Errorf("can't find source file for struct: %s", ftpe.String())
default:
log.Printf("WARNING: can't figure out object type for embedded named type (%T): %v", ftpe, ftpe.Underlying())
}
@@ -1100,17 +1121,17 @@ func (t tagOptions) Name() string {
return t[0]
}
-func parseJSONTag(field *ast.Field) (name string, ignore bool, isString bool, err error) {
+func parseJSONTag(field *ast.Field) (name string, ignore, isString, omitEmpty bool, err error) {
if len(field.Names) > 0 {
name = field.Names[0].Name
}
if field.Tag == nil || len(strings.TrimSpace(field.Tag.Value)) == 0 {
- return name, false, false, nil
+ return name, false, false, false, nil
}
tv, err := strconv.Unquote(field.Tag.Value)
if err != nil {
- return name, false, false, err
+ return name, false, false, false, err
}
if strings.TrimSpace(tv) != "" {
@@ -1123,16 +1144,18 @@ func parseJSONTag(field *ast.Field) (name string, ignore bool, isString bool, er
isString = isFieldStringable(field.Type)
}
+ omitEmpty = jsonParts.Contain("omitempty")
+
switch jsonParts.Name() {
case "-":
- return name, true, isString, nil
+ return name, true, isString, omitEmpty, nil
case "":
- return name, false, isString, nil
+ return name, false, isString, omitEmpty, nil
default:
- return jsonParts.Name(), false, isString, nil
+ return jsonParts.Name(), false, isString, omitEmpty, nil
}
}
- return name, false, false, nil
+ return name, false, false, false, nil
}
// isFieldStringable check if the field type is a scalar. If the field type is
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/spec.go b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/spec.go
index 726787c11bf..20c4e102222 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/spec.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/codescan/spec.go
@@ -54,7 +54,7 @@ func (s *specBuilder) Build() (*spec.Swagger, error) {
return nil, err
}
- if err := s.buildRespones(); err != nil {
+ if err := s.buildResponses(); err != nil {
return nil, err
}
@@ -160,7 +160,7 @@ func (s *specBuilder) buildRoutes() error {
return nil
}
-func (s *specBuilder) buildRespones() error {
+func (s *specBuilder) buildResponses() error {
// build responses dictionary
for _, decl := range s.ctx.app.Responses {
rb := &responseBuilder{
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/bindata.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/bindata.go
index 37936273497..d6779ba61b1 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/bindata.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/bindata.go
@@ -11,7 +11,7 @@ var _bindata embed.FS
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0)
- _ = fs.WalkDir(_bindata, "templates", func(path string, d fs.DirEntry, err error) error {
+ _ = fs.WalkDir(_bindata, "templates", func(path string, _ fs.DirEntry, err error) error {
if err != nil {
return err
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/config.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/config.go
index 2d9413218d4..a565cb20307 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/config.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/config.go
@@ -1,6 +1,7 @@
package generator
import (
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -53,7 +54,8 @@ func ReadConfig(fpath string) (*viper.Viper, error) {
v.SetConfigName(".swagger")
v.AddConfigPath(".")
if err := v.ReadInConfig(); err != nil {
- if _, ok := err.(viper.UnsupportedConfigError); !ok && v.ConfigFileUsed() != "" {
+ var e viper.UnsupportedConfigError
+ if !errors.As(err, &e) && v.ConfigFileUsed() != "" {
return nil, err
}
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/formats.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/formats.go
index 3d127333f47..121679aceff 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/formats.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/formats.go
@@ -57,6 +57,7 @@ var zeroes = map[string]string{
"strfmt.UUID3": "strfmt.UUID3(\"\")",
"strfmt.UUID4": "strfmt.UUID4(\"\")",
"strfmt.UUID5": "strfmt.UUID5(\"\")",
+ "strfmt.ULID": "strfmt.ULID(\"\")",
// "file": "runtime.File",
}
@@ -165,6 +166,7 @@ var formatMapping = map[string]map[string]string{
"uuid3": "strfmt.UUID3",
"uuid4": "strfmt.UUID4",
"uuid5": "strfmt.UUID5",
+ "ulid": "strfmt.ULID",
// For file producers
"file": "runtime.File",
},
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/genopts_nonwin.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/genopts_nonwin.go
index 7e2a4f1c008..fd1d0aaa1a0 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/genopts_nonwin.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/genopts_nonwin.go
@@ -34,13 +34,11 @@ func (t *Repository) LoadPlugin(pluginPath string) error {
log.Printf("Attempting to load template plugin: %s", pluginPath)
p, err := plugin.Open(pluginPath)
-
if err != nil {
return err
}
f, err := p.Lookup("AddFuncs")
-
if err != nil {
return err
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/language.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/language.go
index 01c7a318eee..68e9116632c 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/language.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/language.go
@@ -141,7 +141,7 @@ func (l *LanguageOpts) baseImport(tgt string) string {
// GoLangOpts for rendering items as golang code
func GoLangOpts() *LanguageOpts {
- var goOtherReservedSuffixes = map[string]bool{
+ goOtherReservedSuffixes := map[string]bool{
// see:
// https://golang.org/src/go/build/syslist.go
// https://golang.org/doc/install/source#environment
@@ -154,6 +154,7 @@ func GoLangOpts() *LanguageOpts {
"freebsd": true,
"hurd": true,
"illumos": true,
+ "ios": true,
"js": true,
"linux": true,
"nacl": true,
@@ -172,6 +173,7 @@ func GoLangOpts() *LanguageOpts {
"armbe": true,
"arm64": true,
"arm64be": true,
+ "loong64": true,
"mips": true,
"mipsle": true,
"mips64": true,
@@ -436,5 +438,4 @@ func checkPrefixAndFetchRelativePath(childpath string, parentpath string) (bool,
}
return false, ""
-
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/media.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/media.go
index f9dad9fa4b2..239926dc86c 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/media.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/media.go
@@ -71,6 +71,10 @@ func mediaMime(orig string) string {
return strings.SplitN(orig, ";", 2)[0]
}
+func mediaGoName(media string) string {
+ return pascalize(strings.ReplaceAll(media, "*", "Star"))
+}
+
func mediaParameters(orig string) string {
parts := strings.SplitN(orig, ";", 2)
if len(parts) < 2 {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/model.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/model.go
index 132927d483d..ca5b87a09c0 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/model.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/model.go
@@ -120,10 +120,9 @@ type definitionGenerator struct {
}
func (m *definitionGenerator) Generate() error {
-
mod, err := makeGenDefinition(m.Name, m.Target, m.Model, m.SpecDoc, m.opts)
if err != nil {
- return fmt.Errorf("could not generate definitions for model %s on target %s: %v", m.Name, m.Target, err)
+ return fmt.Errorf("could not generate definitions for model %s on target %s: %w", m.Name, m.Target, err)
}
if m.opts.DumpData {
@@ -133,7 +132,7 @@ func (m *definitionGenerator) Generate() error {
if m.opts.IncludeModel {
log.Println("including additional model")
if err := m.generateModel(mod); err != nil {
- return fmt.Errorf("could not generate model: %v", err)
+ return fmt.Errorf("could not generate model: %w", err)
}
}
log.Println("generated model", m.Name)
@@ -255,9 +254,10 @@ func makeGenDefinitionHierarchy(name, pkg, container string, schema spec.Schema,
StrictAdditionalProperties: opts.StrictAdditionalProperties,
WithXML: opts.WithXML,
StructTags: opts.StructTags,
+ WantsRootedErrorPath: opts.WantsRootedErrorPath,
}
if err := pg.makeGenSchema(); err != nil {
- return nil, fmt.Errorf("could not generate schema for %s: %v", name, err)
+ return nil, fmt.Errorf("could not generate schema for %s: %w", name, err)
}
dsi, ok := di.Discriminators["#/definitions/"+name]
if ok {
@@ -358,6 +358,7 @@ func makeGenDefinitionHierarchy(name, pkg, container string, schema spec.Schema,
"runtime": "github.com/go-openapi/runtime",
"swag": "github.com/go-openapi/swag",
"validate": "github.com/go-openapi/validate",
+ "strfmt": "github.com/go-openapi/strfmt",
}
return &GenDefinition{
@@ -442,12 +443,12 @@ type schemaGenContext struct {
AdditionalProperty bool
Untyped bool
Named bool
- RefHandled bool
IsVirtual bool
IsTuple bool
IncludeValidator bool
IncludeModel bool
StrictAdditionalProperties bool
+ WantsRootedErrorPath bool
WithXML bool
Index int
@@ -473,6 +474,10 @@ type schemaGenContext struct {
// force to use container in inlined definitions (for deconflicting)
UseContainerInName bool
+ // indicates is the schema is part of a slice or a map
+ IsElem bool
+ // indicates is the schema is part of a struct
+ IsProperty bool
}
func (sg *schemaGenContext) NewSliceBranch(schema *spec.Schema) *schemaGenContext {
@@ -500,6 +505,7 @@ func (sg *schemaGenContext) NewSliceBranch(schema *spec.Schema) *schemaGenContex
pg.ValueExpr = pg.ValueExpr + "[" + indexVar + "]"
pg.Schema = *schema
pg.Required = false
+ pg.IsElem = true
if sg.IsVirtual {
pg.TypeResolver = sg.TypeResolver.NewWithModelName(sg.TypeResolver.ModelName)
}
@@ -566,6 +572,7 @@ func (sg *schemaGenContext) NewStructBranch(name string, schema spec.Schema) *sc
pg.Name = name
pg.ValueExpr = pg.ValueExpr + "." + pascalize(goName(&schema, name))
pg.Schema = schema
+ pg.IsProperty = true
for _, fn := range sg.Schema.Required {
if name == fn {
pg.Required = true
@@ -621,6 +628,7 @@ func (sg *schemaGenContext) NewAdditionalProperty(schema spec.Schema) *schemaGen
if sg.Path != "" {
pg.Path = sg.Path + "+\".\"+" + pg.KeyVar
}
+ pg.IsElem = true
// propagates the special IsNullable override for maps of slices and
// maps of aliased types.
pg.GenSchema.IsMapNullOverride = sg.GenSchema.IsMapNullOverride
@@ -680,7 +688,7 @@ func (sg *schemaGenContext) schemaValidations() sharedValidations {
// when readOnly or default is specified, this disables Required validation (Swagger-specific)
isRequired = false
if sg.Required {
- log.Printf("warn: properties with a default value or readOnly should not be required [%s]", sg.Name)
+ log.Printf("warning: properties with a default value or readOnly should not be required [%s]", sg.Name)
}
}
@@ -841,7 +849,7 @@ func (sg *schemaGenContext) buildProperties() error {
}
// set property name
- var nm = filepath.Base(emprop.Schema.Ref.GetURL().Fragment)
+ nm := filepath.Base(emprop.Schema.Ref.GetURL().Fragment)
tr := sg.TypeResolver.NewWithModelName(goName(&emprop.Schema, swag.ToGoName(nm)))
ttpe, err := tr.ResolveSchema(sch, false, true)
@@ -1228,7 +1236,13 @@ func (mt *mapStack) Dict() map[string]interface{} {
func (sg *schemaGenContext) buildAdditionalProperties() error {
if sg.Schema.AdditionalProperties == nil {
- return nil
+ if sg.Schema.MinProperties == nil && sg.Schema.MaxProperties == nil {
+ return nil
+ }
+
+ // whenever there is a validation on min/max properties and no additionalProperties is defined,
+ // we imply additionalProperties: true (corresponds to jsonschema defaults).
+ sg.Schema.AdditionalProperties = &spec.SchemaOrBool{Allows: true}
}
addp := *sg.Schema.AdditionalProperties
@@ -1256,7 +1270,9 @@ func (sg *schemaGenContext) buildAdditionalProperties() error {
sg.GenSchema.IsComplexObject = false
sg.GenSchema.IsMap = true
- sg.GenSchema.ValueExpression += "." + swag.ToGoName(sg.Name+" additionalProperties")
+ if !sg.IsElem && !sg.IsProperty {
+ sg.GenSchema.ValueExpression += "." + swag.ToGoName(sg.Name+" additionalProperties")
+ }
cp := sg.NewAdditionalProperty(*addp.Schema)
cp.Name += "AdditionalProperties"
cp.Required = false
@@ -1325,7 +1341,6 @@ func (sg *schemaGenContext) buildAdditionalProperties() error {
if err := comprop.makeGenSchema(); err != nil {
return err
}
-
sg.MergeResult(comprop, false)
sg.GenSchema.AdditionalProperties = &comprop.GenSchema
sg.GenSchema.AdditionalProperties.ValueExpression = sg.GenSchema.ValueExpression + "[" + comprop.KeyVar + "]"
@@ -1598,9 +1613,8 @@ func (sg *schemaGenContext) buildItems() error {
}
func (sg *schemaGenContext) buildAdditionalItems() error {
- wantsAdditionalItems :=
- sg.Schema.AdditionalItems != nil &&
- (sg.Schema.AdditionalItems.Allows || sg.Schema.AdditionalItems.Schema != nil)
+ wantsAdditionalItems := sg.Schema.AdditionalItems != nil &&
+ (sg.Schema.AdditionalItems.Allows || sg.Schema.AdditionalItems.Schema != nil)
sg.GenSchema.HasAdditionalItems = wantsAdditionalItems
if wantsAdditionalItems {
@@ -1672,8 +1686,7 @@ func (sg *schemaGenContext) shortCircuitNamedRef() (bool, error) {
// NOTE: this assumes that all $ref point to a definition,
// i.e. the spec is canonical, as guaranteed by minimal flattening.
//
- // TODO: RefHandled is actually set nowhere
- if sg.RefHandled || !sg.Named || sg.Schema.Ref.String() == "" {
+ if !sg.Named || sg.Schema.Ref.String() == "" {
return false, nil
}
debugLogAsJSON("short circuit named ref: %q", sg.Schema.Ref.String(), sg.Schema)
@@ -1684,6 +1697,8 @@ func (sg *schemaGenContext) shortCircuitNamedRef() (bool, error) {
// check if the $ref points to a simple type or polymorphic (base) type.
//
// If this is the case, just realias this simple type, without creating a struct.
+ //
+ // In templates this case is identified by .IsSuperAlias = true
asch, era := analysis.Schema(analysis.SchemaOpts{
Root: sg.TypeResolver.Doc.Spec(),
BasePath: sg.TypeResolver.Doc.SpecFilePath(),
@@ -1734,10 +1749,16 @@ func (sg *schemaGenContext) shortCircuitNamedRef() (bool, error) {
}
// Aliased object: use golang struct composition.
+ // Covers case of a type redefinition like:
+ // thistype:
+ // $ref: #/definitions/othertype
+ //
// This is rendered as a struct with type field, i.e. :
// Alias struct {
// AliasedType
// }
+ //
+ // In templates, the schema is composed like AllOf.
nullableOverride := sg.GenSchema.IsNullable
tpe := resolvedType{}
@@ -1750,17 +1771,26 @@ func (sg *schemaGenContext) shortCircuitNamedRef() (bool, error) {
tpe.IsAnonymous = false
tpe.IsNullable = sg.TypeResolver.isNullable(&sg.Schema)
- item := sg.NewCompositionBranch(sg.Schema, 0)
- if err := item.makeGenSchema(); err != nil {
+ branch := sg.NewCompositionBranch(sg.Schema, 0)
+ if err := branch.makeGenSchema(); err != nil {
return true, err
}
sg.GenSchema.resolvedType = tpe
sg.GenSchema.IsNullable = sg.GenSchema.IsNullable || nullableOverride
// prevent format from bubbling up in composed type
- item.GenSchema.IsCustomFormatter = false
+ branch.GenSchema.IsCustomFormatter = false
+
+ sg.MergeResult(branch, true)
+
+ tpx, ers := sg.TypeResolver.ResolveSchema(&sg.Schema, false, true)
+ if ers != nil {
+ return false, ers
+ }
+ // we don't know the actual validation status yet. So assume true,
+ // unless we can infer that no Validate() method will be present
+ branch.GenSchema.HasValidations = !tpx.IsInterface && !tpx.IsStream
+ sg.GenSchema.AllOf = append(sg.GenSchema.AllOf, branch.GenSchema)
- sg.MergeResult(item, true)
- sg.GenSchema.AllOf = append(sg.GenSchema.AllOf, item.GenSchema)
return true, nil
}
@@ -1967,6 +1997,9 @@ func (sg *schemaGenContext) makeGenSchema() error {
sg.GenSchema.Default = sg.Schema.Default
sg.GenSchema.StructTags = sg.StructTags
sg.GenSchema.ExtraImports = make(map[string]string)
+ sg.GenSchema.WantsRootedErrorPath = sg.WantsRootedErrorPath
+ sg.GenSchema.IsElem = sg.IsElem
+ sg.GenSchema.IsProperty = sg.IsProperty
var err error
returns, err := sg.shortCircuitNamedRef()
@@ -1974,6 +2007,7 @@ func (sg *schemaGenContext) makeGenSchema() error {
return err
}
if returns {
+ // short circuited on a resolved $ref
return nil
}
debugLogAsJSON("after short circuit named ref", sg.Schema)
@@ -2035,6 +2069,8 @@ func (sg *schemaGenContext) makeGenSchema() error {
log.Printf("INFO: type %s is external, with inferred spec type %s, referred to as %s", sg.GenSchema.Name, sg.GenSchema.GoType, extType)
sg.GenSchema.GoType = extType
sg.GenSchema.AliasedType = extType
+
+ // short circuit schema building for external types
return nil
}
// TODO: case for embedded types as anonymous definitions
@@ -2073,6 +2109,8 @@ func (sg *schemaGenContext) makeGenSchema() error {
sg.GenSchema.IsMap = prev.IsMap
sg.GenSchema.IsAdditionalProperties = prev.IsAdditionalProperties
sg.GenSchema.IsBaseType = sg.GenSchema.HasDiscriminator
+ sg.GenSchema.IsElem = prev.IsElem
+ sg.GenSchema.IsProperty = prev.IsProperty
debugLogAsJSON("gschema nnullable:IsNullable:%t,resolver.IsNullable:%t,nullableOverride:%t",
sg.GenSchema.IsNullable, otn, nullableOverride, sg.Schema)
@@ -2114,5 +2152,6 @@ func (sg *schemaGenContext) makeGenSchema() error {
(gs.IsTuple || gs.IsComplexObject || gs.IsAdditionalProperties || (gs.IsPrimitive && gs.IsAliased && gs.IsCustomFormatter && !strings.Contains(gs.Zero(), `("`)))
debugLog("finished gen schema for %q", sg.Name)
+
return nil
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/operation.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/operation.go
index 8f4b8b2f6b7..a2098c37588 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/operation.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/operation.go
@@ -18,7 +18,9 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log"
"path/filepath"
+ "regexp"
"sort"
"strings"
@@ -149,7 +151,6 @@ type operationGenerator struct {
// Generate a single operation
func (o *operationGenerator) Generate() error {
-
defaultImports := o.GenOpts.defaultImports()
apiPackage := o.GenOpts.LanguageOpts.ManglePackagePath(o.GenOpts.APIPackage, defaultOperationsTarget)
@@ -164,6 +165,7 @@ func (o *operationGenerator) Generate() error {
Imports: imports,
DefaultScheme: o.DefaultScheme,
Doc: o.Doc,
+ PristineDefs: o.Doc.Pristine(),
Analyzed: o.Analyzed,
BasePath: o.BasePath,
GenOpts: o.GenOpts,
@@ -223,7 +225,7 @@ type codeGenOpBuilder struct {
Target string
Operation spec.Operation
Doc *loads.Document
- PristineDoc *loads.Document
+ PristineDefs *loads.Document
Analyzed *analysis.Spec
DefaultImports map[string]string
Imports map[string]string
@@ -245,12 +247,24 @@ func paramMappings(params map[string]spec.Parameter) (map[string]map[string]stri
"header": make(map[string]string, len(params)),
"body": make(map[string]string, len(params)),
}
+ debugLog("paramMappings: map=%v", params)
// In order to avoid unstable generation, adopt same naming convention
// for all parameters with same name across locations.
- seenIds := make(map[string]interface{}, len(params))
+ seenIDs := make(map[string]interface{}, len(params))
for id, p := range params {
- if val, ok := seenIds[p.Name]; ok {
+ debugLog("paramMappings: params: id=%s, In=%q, Name=%q", id, p.In, p.Name)
+ // guard against possible validation failures and/or skipped issues
+ if _, found := idMapping[p.In]; !found {
+ log.Printf(`warning: parameter named %q has an invalid "in": %q. Skipped`, p.Name, p.In)
+ continue
+ }
+ if p.Name == "" {
+ log.Printf(`warning: unnamed parameter (%+v). Skipped`, p)
+ continue
+ }
+
+ if val, ok := seenIDs[p.Name]; ok {
previous := val.(struct{ id, in string })
idMapping[p.In][p.Name] = swag.ToGoName(id)
// rewrite the previously found one
@@ -258,11 +272,11 @@ func paramMappings(params map[string]spec.Parameter) (map[string]map[string]stri
} else {
idMapping[p.In][p.Name] = swag.ToGoName(p.Name)
}
- seenIds[strings.ToLower(idMapping[p.In][p.Name])] = struct{ id, in string }{id: id, in: p.In}
+ seenIDs[strings.ToLower(idMapping[p.In][p.Name])] = struct{ id, in string }{id: id, in: p.In}
}
// pick a deconflicted private name for timeout for this operation
- timeoutName := renameTimeout(seenIds, "timeout")
+ timeoutName := renameTimeout(seenIDs, "timeout")
return idMapping, timeoutName
}
@@ -272,12 +286,12 @@ func paramMappings(params map[string]spec.Parameter) (map[string]map[string]stri
//
// NOTE: this merely protects the timeout field in the client parameter struct,
// fields "Context" and "HTTPClient" remain exposed to name conflicts.
-func renameTimeout(seenIds map[string]interface{}, timeoutName string) string {
- if seenIds == nil {
+func renameTimeout(seenIDs map[string]interface{}, timeoutName string) string {
+ if seenIDs == nil {
return timeoutName
}
current := strings.ToLower(timeoutName)
- if _, ok := seenIds[current]; !ok {
+ if _, ok := seenIDs[current]; !ok {
return timeoutName
}
var next string
@@ -297,7 +311,7 @@ func renameTimeout(seenIds map[string]interface{}, timeoutName string) string {
default:
next = timeoutName + "1"
}
- return renameTimeout(seenIds, next)
+ return renameTimeout(seenIDs, next)
}
func (b *codeGenOpBuilder) MakeOperation() (GenOperation, error) {
@@ -325,7 +339,6 @@ func (b *codeGenOpBuilder) MakeOperation() (GenOperation, error) {
for _, p := range paramsForOperation {
cp, err := b.MakeParameter(receiver, resolver, p, idMapping)
-
if err != nil {
return GenOperation{}, err
}
@@ -417,10 +430,7 @@ func (b *codeGenOpBuilder) MakeOperation() (GenOperation, error) {
originalExtraSchemes := getExtraSchemes(operation.Extensions)
produces := producesOrDefault(operation.Produces, swsp.Produces, b.DefaultProduces)
- sort.Strings(produces)
-
consumes := producesOrDefault(operation.Consumes, swsp.Consumes, b.DefaultConsumes)
- sort.Strings(consumes)
var successResponse *GenResponse
for _, resp := range successResponses {
@@ -718,7 +728,12 @@ func (b *codeGenOpBuilder) MakeParameter(receiver string, resolver *typeResolver
b.Method, b.Path, param.Name, goName)
}
} else if len(idMapping) > 0 {
- id = idMapping[param.In][param.Name]
+ id, ok = idMapping[param.In][param.Name]
+ if !ok {
+ // skipped parameter
+ return GenParameter{}, fmt.Errorf(`%s %s, %q has an invalid parameter definition`,
+ b.Method, b.Path, param.Name)
+ }
}
res := GenParameter{
@@ -739,6 +754,16 @@ func (b *codeGenOpBuilder) MakeParameter(receiver string, resolver *typeResolver
Extensions: param.Extensions,
}
+ if goCustomTag, ok := param.Extensions["x-go-custom-tag"]; ok {
+ customTag, ok := goCustomTag.(string)
+ if !ok {
+ return GenParameter{}, fmt.Errorf(`%s %s, parameter %q: "x-go-custom-tag" field must be a string, not a %T`,
+ b.Method, b.Path, param.Name, goCustomTag)
+ }
+
+ res.CustomTag = customTag
+ }
+
if param.In == "body" {
// Process parameters declared in body (i.e. have a Schema)
res.Required = param.Required
@@ -763,7 +788,7 @@ func (b *codeGenOpBuilder) MakeParameter(receiver string, resolver *typeResolver
return GenParameter{}, err
}
res.Child = &pi
- // Propagates HasValidations from from child array
+ // Propagates HasValidations from child array
hasChildValidations = pi.HasValidations
}
res.IsNullable = !param.Required && !param.AllowEmptyValue
@@ -964,7 +989,6 @@ func (b *codeGenOpBuilder) setBodyParamValidation(p *GenParameter) {
p.HasModelBodyMap = hasModelBodyMap
p.HasSimpleBodyMap = hasSimpleBodyMap
}
-
}
// makeSecuritySchemes produces a sorted list of security schemes for this operation
@@ -1012,10 +1036,7 @@ func (b *codeGenOpBuilder) cloneSchema(schema *spec.Schema) *spec.Schema {
// This uses a deep clone the spec document to construct a type resolver which knows about definitions when the making of this operation started,
// and only these definitions. We are not interested in the "original spec", but in the already transformed spec.
func (b *codeGenOpBuilder) saveResolveContext(resolver *typeResolver, schema *spec.Schema) (*typeResolver, *spec.Schema) {
- if b.PristineDoc == nil {
- b.PristineDoc = b.Doc.Pristine()
- }
- rslv := newTypeResolver(b.GenOpts.LanguageOpts.ManglePackageName(resolver.ModelsPackage, defaultModelsTarget), b.DefaultImports[b.ModelsPackage], b.PristineDoc)
+ rslv := newTypeResolver(b.GenOpts.LanguageOpts.ManglePackageName(resolver.ModelsPackage, defaultModelsTarget), b.DefaultImports[b.ModelsPackage], b.PristineDefs)
return rslv, b.cloneSchema(schema)
}
@@ -1226,11 +1247,19 @@ func (b *codeGenOpBuilder) analyzeTags() (string, []string, bool) {
// conflict with "operations" package is handled separately
tag = renameOperationPackage(intersected, tag)
}
+
+ if matches := versionedPkgRex.FindStringSubmatch(tag); len(matches) > 2 {
+ // rename packages like "v1", "v2" ... as they hold a special meaning for go
+ tag = "version" + matches[2]
+ }
+
b.APIPackage = b.GenOpts.LanguageOpts.ManglePackageName(tag, b.APIPackage) // actual package name
b.APIPackageAlias = deconflictTag(intersected, b.APIPackage) // deconflicted import alias
return tag, intersected, len(filter) == 0 || len(filter) > 0 && len(intersected) > 0
}
+var versionedPkgRex = regexp.MustCompile(`(?i)^(v)([0-9]+)$`)
+
func maxInt(a, b int) int {
if a > b {
return a
@@ -1268,6 +1297,7 @@ func deconflictPkg(pkg string, renamer func(string) string) string {
case "tls", "http", "fmt", "strings", "log", "flags", "pflag", "json", "time":
return renamer(pkg)
}
+
return pkg
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/shared.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/shared.go
index 5e2c2cee2f6..75ed251fe59 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/shared.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/shared.go
@@ -68,15 +68,21 @@ func DefaultSectionOpts(gen *GenOpts) {
FileName: "{{ (snakize (pascalize .Name)) }}.go",
},
}
- if gen.IncludeCLi {
- opts = append(opts, TemplateOpts{
+ sec.Models = opts
+ }
+
+ if len(sec.PostModels) == 0 && gen.IncludeCLi {
+ // For CLI, we need to postpone the generation of model-supporting source,
+ // in order for go imports to run properly in all cases.
+ opts := []TemplateOpts{
+ {
Name: "clidefinitionhook",
Source: "asset:cliModelcli",
Target: "{{ joinFilePath .Target (toPackagePath .CliPackage) }}",
FileName: "{{ (snakize (pascalize .Name)) }}_model.go",
- })
+ },
}
- sec.Models = opts
+ sec.PostModels = opts
}
if len(sec.Operations) == 0 {
@@ -228,7 +234,6 @@ func DefaultSectionOpts(gen *GenOpts) {
Target: "{{ joinFilePath .Target (toPackagePath .ServerPackage) }}",
FileName: "auto_configure_{{ (snakize (pascalize .Name)) }}.go",
})
-
} else {
opts = append(opts, TemplateOpts{
Name: "configure",
@@ -242,7 +247,6 @@ func DefaultSectionOpts(gen *GenOpts) {
}
}
gen.Sections = sec
-
}
// MarkdownOpts for rendering a spec as markdown
@@ -255,6 +259,7 @@ func MarkdownOpts() *LanguageOpts {
// MarkdownSectionOpts for a given opts and output file.
func MarkdownSectionOpts(gen *GenOpts, output string) {
gen.Sections.Models = nil
+ gen.Sections.PostModels = nil
gen.Sections.OperationGroups = nil
gen.Sections.Operations = nil
gen.LanguageOpts = MarkdownOpts()
@@ -275,7 +280,7 @@ type TemplateOpts struct {
Target string `mapstructure:"target"`
FileName string `mapstructure:"file_name"`
SkipExists bool `mapstructure:"skip_exists"`
- SkipFormat bool `mapstructure:"skip_format"`
+ SkipFormat bool `mapstructure:"skip_format"` // not a feature, but for debugging. generated code before formatting might not work because of unused imports.
}
// SectionOpts allows for specifying options to customize the templates used for generation
@@ -284,6 +289,7 @@ type SectionOpts struct {
Operations []TemplateOpts `mapstructure:"operations"`
OperationGroups []TemplateOpts `mapstructure:"operation_groups"`
Models []TemplateOpts `mapstructure:"models"`
+ PostModels []TemplateOpts `mapstructure:"post_models"`
}
// GenOptsCommon the options for the generator
@@ -344,6 +350,7 @@ type GenOptsCommon struct {
AllowEnumCI bool
StrictResponders bool
AcceptDefinitionsOnly bool
+ WantsRootedErrorPath bool
templates *Repository // a shallow clone of the global template repository
}
@@ -356,7 +363,7 @@ func (g *GenOpts) CheckOpts() error {
if !filepath.IsAbs(g.Target) {
if _, err := filepath.Abs(g.Target); err != nil {
- return fmt.Errorf("could not locate target %s: %v", g.Target, err)
+ return fmt.Errorf("could not locate target %s: %w", g.Target, err)
}
}
@@ -602,11 +609,11 @@ func (g *GenOpts) render(t *TemplateOpts, data interface{}) ([]byte, error) {
}
content, err := os.ReadFile(templateFile)
if err != nil {
- return nil, fmt.Errorf("error while opening %s template file: %v", templateFile, err)
+ return nil, fmt.Errorf("error while opening %s template file: %w", templateFile, err)
}
tt, err := template.New(t.Source).Funcs(FuncMapFunc(g.LanguageOpts)).Parse(string(content))
if err != nil {
- return nil, fmt.Errorf("template parsing failed on template %s: %v", t.Name, err)
+ return nil, fmt.Errorf("template parsing failed on template %s: %w", t.Name, err)
}
templ = tt
}
@@ -617,7 +624,7 @@ func (g *GenOpts) render(t *TemplateOpts, data interface{}) ([]byte, error) {
var tBuf bytes.Buffer
if err := templ.Execute(&tBuf, data); err != nil {
- return nil, fmt.Errorf("template execution failed for template %s: %v", t.Name, err)
+ return nil, fmt.Errorf("template execution failed for template %s: %w", t.Name, err)
}
log.Printf("executed template %s", t.Source)
@@ -631,7 +638,7 @@ func (g *GenOpts) render(t *TemplateOpts, data interface{}) ([]byte, error) {
func (g *GenOpts) write(t *TemplateOpts, data interface{}) error {
dir, fname, err := g.location(t, data)
if err != nil {
- return fmt.Errorf("failed to resolve template location for template %s: %v", t.Name, err)
+ return fmt.Errorf("failed to resolve template location for template %s: %w", t.Name, err)
}
if t.SkipExists && fileExists(dir, fname) {
@@ -643,7 +650,7 @@ func (g *GenOpts) write(t *TemplateOpts, data interface{}) error {
log.Printf("creating generated file %q in %q as %s", fname, dir, t.Name)
content, err := g.render(t, data)
if err != nil {
- return fmt.Errorf("failed rendering template data for %s: %v", t.Name, err)
+ return fmt.Errorf("failed rendering template data for %s: %w", t.Name, err)
}
if dir != "" {
@@ -652,7 +659,7 @@ func (g *GenOpts) write(t *TemplateOpts, data interface{}) error {
debugLog("creating directory %q for \"%s\"", dir, t.Name)
// Directory settings consistent with file privileges.
// Environment's umask may alter this setup
- if e := os.MkdirAll(dir, 0755); e != nil {
+ if e := os.MkdirAll(dir, 0o755); e != nil {
return e
}
}
@@ -666,18 +673,18 @@ func (g *GenOpts) write(t *TemplateOpts, data interface{}) error {
formatted, err = g.LanguageOpts.FormatContent(filepath.Join(dir, fname), content)
if err != nil {
log.Printf("source formatting failed on template-generated source (%q for %s). Check that your template produces valid code", filepath.Join(dir, fname), t.Name)
- writeerr = os.WriteFile(filepath.Join(dir, fname), content, 0644) // #nosec
+ writeerr = os.WriteFile(filepath.Join(dir, fname), content, 0o644) // #nosec
if writeerr != nil {
- return fmt.Errorf("failed to write (unformatted) file %q in %q: %v", fname, dir, writeerr)
+ return fmt.Errorf("failed to write (unformatted) file %q in %q: %w", fname, dir, writeerr)
}
log.Printf("unformatted generated source %q has been dumped for template debugging purposes. DO NOT build on this source!", fname)
- return fmt.Errorf("source formatting on generated source %q failed: %v", t.Name, err)
+ return fmt.Errorf("source formatting on generated source %q failed: %w", t.Name, err)
}
}
- writeerr = os.WriteFile(filepath.Join(dir, fname), formatted, 0644) // #nosec
+ writeerr = os.WriteFile(filepath.Join(dir, fname), formatted, 0o644) // #nosec
if writeerr != nil {
- return fmt.Errorf("failed to write file %q in %q: %v", fname, dir, writeerr)
+ return fmt.Errorf("failed to write file %q in %q: %w", fname, dir, writeerr)
}
return err
}
@@ -713,6 +720,20 @@ func (g *GenOpts) renderApplication(app *GenApp) error {
return err
}
}
+
+ if len(g.Sections.PostModels) > 0 {
+ log.Printf("post-rendering from %d models", len(app.Models))
+ for _, templateToPin := range g.Sections.PostModels {
+ templateConfig := templateToPin
+ for _, modelToPin := range app.Models {
+ modelData := modelToPin
+ if err := g.write(&templateConfig, modelData); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
return nil
}
@@ -1069,7 +1090,7 @@ func dumpData(data interface{}) error {
if err != nil {
return err
}
- fmt.Fprintln(os.Stdout, string(bb))
+ fmt.Fprintln(os.Stdout, string(bb)) // TODO(fred): not testable
return nil
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/spec.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/spec.go
index e7399bb9544..df3528a62ea 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/spec.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/spec.go
@@ -41,8 +41,11 @@ func (g *GenOpts) validateAndFlattenSpec() (*loads.Document, error) {
if validationErrors != nil {
str := fmt.Sprintf("The swagger spec at %q is invalid against swagger specification %s. see errors :\n",
g.Spec, specDoc.Version())
- for _, desc := range validationErrors.(*swaggererrors.CompositeError).Errors {
- str += fmt.Sprintf("- %s\n", desc)
+ var cerr *swaggererrors.CompositeError
+ if errors.As(validationErrors, &cerr) {
+ for _, desc := range cerr.Errors {
+ str += fmt.Sprintf("- %s\n", desc)
+ }
}
return nil, errors.New(str)
}
@@ -84,6 +87,16 @@ func (g *GenOpts) validateAndFlattenSpec() (*loads.Document, error) {
return nil, err
}
+ if g.FlattenOpts.Expand {
+ // for a similar reason as the one mentioned above for validate,
+ // schema expansion alters the internal doc cache in the spec.
+ // This nasty bug (in spec expander) affects circular references.
+ // So we need to reload the spec from a clone.
+ // Notice that since the spec inside the document has been modified, we should
+ // ensure that Pristine refreshes its row root document.
+ specDoc = specDoc.Pristine()
+ }
+
// yields the preprocessed spec document
return specDoc, nil
}
@@ -229,7 +242,7 @@ func WithAutoXOrder(specPath string) string {
}
tmpFile := filepath.Join(tmpDir, filepath.Base(specPath))
- if err := os.WriteFile(tmpFile, out, 0600); err != nil {
+ if err := os.WriteFile(tmpFile, out, 0o600); err != nil {
panic(err)
}
return tmpFile
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/structs.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/structs.go
index 522be1446a1..14522888184 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/structs.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/structs.go
@@ -20,6 +20,7 @@ import (
type GenCommon struct {
Copyright string
TargetImportPath string
+ RootedErrorPath bool // wants array and map types to have a path corresponding to their type in reported errors
}
// GenDefinition contains all the properties to generate a
@@ -85,6 +86,8 @@ type GenSchema struct {
HasBaseType bool
IsSubType bool
IsExported bool
+ IsElem bool // IsElem gives some context when the schema is part of an array or a map
+ IsProperty bool // IsProperty gives some context when the schema is a property of an object
DiscriminatorField string
DiscriminatorValue string
Discriminates map[string]string
@@ -96,6 +99,7 @@ type GenSchema struct {
StructTags []string
ExtraImports map[string]string // non-standard imports detected when using external types
ExternalDocs *spec.ExternalDocumentation
+ WantsRootedErrorPath bool
}
func (g GenSchema) renderMarshalTag() string {
@@ -361,6 +365,8 @@ type GenParameter struct {
CollectionFormat string
+ CustomTag string
+
Child *GenItems
Parent *GenItems
@@ -514,6 +520,8 @@ type GenOperationGroup struct {
RootPackage string
GenOpts *GenOpts
PackageAlias string
+
+ ClientOptions *GenClientOptions
}
// GenOperationGroups is a sorted collection of operation groups
@@ -801,3 +809,10 @@ type GenSecurityRequirements []GenSecurityRequirement
func (g GenSecurityRequirements) Len() int { return len(g) }
func (g GenSecurityRequirements) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
func (g GenSecurityRequirements) Less(i, j int) bool { return g[i].Name < g[j].Name }
+
+// GenClientOptions holds extra pieces of information
+// to generate a client.
+type GenClientOptions struct {
+ ProducesMediaTypes []string // filled with all producers if any method as more than 1
+ ConsumesMediaTypes []string // filled with all consumers if any method as more than 1
+}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/support.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/support.go
index df3996df492..3794ee1a31d 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/support.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/support.go
@@ -58,6 +58,9 @@ func GenerateMarkdown(output string, modelNames, operationIDs []string, opts *Ge
if err := opts.EnsureDefaults(); err != nil {
return err
}
+ if opts.Target != "" && opts.Target != "." {
+ output = filepath.Join(opts.Target, output)
+ }
MarkdownSectionOpts(opts, output)
generator, err := newAppGenerator("", modelNames, operationIDs, opts)
@@ -184,7 +187,7 @@ func (a *appGenerator) Generate() error {
}
// optional OperationGroups templates generation
if err := a.GenOpts.renderOperationGroup(&opg); err != nil {
- return fmt.Errorf("error while rendering operation group: %v", err)
+ return fmt.Errorf("error while rendering operation group: %w", err)
}
}
}
@@ -217,11 +220,13 @@ func (a *appGenerator) GenerateSupport(ap *GenApp) error {
app.DefaultImports[pkgAlias] = serverPath
app.ServerPackageAlias = pkgAlias
- // add client import for cli generation
- clientPath := path.Join(baseImport,
- a.GenOpts.LanguageOpts.ManglePackagePath(a.ClientPackage, defaultClientTarget))
- clientPkgAlias := importAlias(clientPath)
- app.DefaultImports[clientPkgAlias] = clientPath
+ if a.GenOpts.IncludeCLi { // no need to add this import when there is no CLI
+ // add client import for cli generation
+ clientPath := path.Join(baseImport,
+ a.GenOpts.LanguageOpts.ManglePackagePath(a.ClientPackage, defaultClientTarget))
+ clientPkgAlias := importAlias(clientPath)
+ app.DefaultImports[clientPkgAlias] = clientPath
+ }
return a.GenOpts.renderApplication(app)
}
@@ -262,9 +267,11 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
imports := make(map[string]string, 50)
alias := deconflictPkg(a.GenOpts.LanguageOpts.ManglePackageName(a.OperationsPackage, defaultOperationsTarget), renameAPIPackage)
- imports[alias] = path.Join(
- baseImport,
- a.GenOpts.LanguageOpts.ManglePackagePath(a.OperationsPackage, defaultOperationsTarget))
+ if !a.GenOpts.IsClient { // we don't want to inject this import for clients
+ imports[alias] = path.Join(
+ baseImport,
+ a.GenOpts.LanguageOpts.ManglePackagePath(a.OperationsPackage, defaultOperationsTarget))
+ }
implAlias := ""
if a.GenOpts.ImplementationPackage != "" {
@@ -284,7 +291,7 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
a.GenOpts,
)
if err != nil {
- return GenApp{}, fmt.Errorf("error in model %s while planning definitions: %v", mn, err)
+ return GenApp{}, fmt.Errorf("error in model %s while planning definitions: %w", mn, err)
}
if model != nil {
if !model.External {
@@ -304,6 +311,10 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
log.Printf("planning operations (found: %d)", len(a.Operations))
genOps := make(GenOperations, 0, len(a.Operations))
+ consumesIndex := make(map[string][]string)
+ producesIndex := make(map[string][]string)
+ pristineDoc := a.SpecDoc.Pristine()
+
for operationName, opp := range a.Operations {
o := opp.Op
o.ID = operationName
@@ -316,6 +327,7 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
Imports: imports,
DefaultScheme: a.DefaultScheme,
Doc: a.SpecDoc,
+ PristineDefs: pristineDoc,
Analyzed: a.Analyzed,
BasePath: a.SpecDoc.BasePath(),
GenOpts: a.GenOpts,
@@ -355,7 +367,18 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
op.ReceiverName = receiver
op.Tags = tags // ordered tags for this operation, possibly filtered by CLI params
- genOps = append(genOps, op)
+
+ allConsumes := pruneEmpty(op.ConsumesMediaTypes)
+ if bldr.DefaultConsumes != "" {
+ allConsumes = append(allConsumes, bldr.DefaultConsumes)
+ }
+ consumesIndex[bldr.Name] = allConsumes
+
+ allProduces := pruneEmpty(op.ProducesMediaTypes)
+ if bldr.DefaultProduces != "" {
+ allProduces = append(allProduces, bldr.DefaultProduces)
+ }
+ producesIndex[bldr.Name] = allProduces
if !a.GenOpts.SkipTagPackages && tag != "" {
importPath := filepath.ToSlash(
@@ -364,8 +387,19 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
a.GenOpts.LanguageOpts.ManglePackagePath(a.OperationsPackage, defaultOperationsTarget),
a.GenOpts.LanguageOpts.ManglePackageName(bldr.APIPackage, defaultOperationsTarget),
))
+
+ // check for possible conflicts that requires import aliasing
+ pth, aliasUsed := defaultImports[bldr.APIPackageAlias]
+ if (a.GenOpts.IsClient && bldr.APIPackageAlias == a.GenOpts.ClientPackage) || // we don't want import to shadow the current package
+ (a.GenOpts.IncludeCLi && bldr.APIPackageAlias == a.GenOpts.CliPackage) ||
+ (aliasUsed && pth != importPath) { // was already imported with a different target
+ op.PackageAlias = renameOperationPackage(tags, bldr.APIPackageAlias)
+ bldr.APIPackageAlias = op.PackageAlias
+ }
defaultImports[bldr.APIPackageAlias] = importPath
}
+
+ genOps = append(genOps, op)
}
sort.Sort(genOps)
@@ -378,8 +412,12 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
opGroups := make(GenOperationGroups, 0, len(opsGroupedByPackage))
for k, v := range opsGroupedByPackage {
- log.Printf("operations for package packages %q (found: %d)", k, len(v))
+ log.Printf("operations for package %q (found: %d)", k, len(v))
sort.Sort(v)
+
+ consumesInGroup := make([]string, 0, 2)
+ producesInGroup := make([]string, 0, 2)
+
// trim duplicate extra schemas within the same package
vv := make(GenOperations, 0, len(v))
seenExtraSchema := make(map[string]bool)
@@ -393,6 +431,9 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
}
op.ExtraSchemas = uniqueExtraSchemas
vv = append(vv, op)
+
+ consumesInGroup = concatUnique(consumesInGroup, consumesIndex[op.Name])
+ producesInGroup = concatUnique(producesInGroup, producesIndex[op.Name])
}
var pkg string
if len(vv) > 0 {
@@ -414,6 +455,19 @@ func (a *appGenerator) makeCodegenApp() (GenApp, error) {
RootPackage: a.APIPackage,
GenOpts: a.GenOpts,
}
+
+ if a.GenOpts.IsClient {
+ // generating extra options to switch media type in client
+ if len(consumesInGroup) > 1 || len(producesInGroup) > 1 {
+ sort.Strings(producesInGroup)
+ sort.Strings(consumesInGroup)
+ options := &GenClientOptions{
+ ProducesMediaTypes: producesInGroup,
+ ConsumesMediaTypes: consumesInGroup,
+ }
+ opGroup.ClientOptions = options
+ }
+ }
opGroups = append(opGroups, opGroup)
}
sort.Sort(opGroups)
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/template_repo.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/template_repo.go
index e78ae602ac0..2c377372d50 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/template_repo.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/template_repo.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "log"
"math"
"os"
"path"
@@ -16,8 +17,6 @@ import (
"text/template/parse"
"unicode"
- "log"
-
"github.com/Masterminds/sprig/v3"
"github.com/go-openapi/inflect"
"github.com/go-openapi/runtime"
@@ -94,6 +93,7 @@ func DefaultFuncMap(lang *LanguageOpts) template.FuncMap {
"inspect": pretty.Sprint,
"cleanPath": path.Clean,
"mediaTypeName": mediaMime,
+ "mediaGoName": mediaGoName,
"arrayInitializer": lang.arrayInitializer,
"hasPrefix": strings.HasPrefix,
"stringContains": strings.Contains,
@@ -134,9 +134,55 @@ func DefaultFuncMap(lang *LanguageOpts) template.FuncMap {
},
"docCollectionFormat": resolvedDocCollectionFormat,
"trimSpace": strings.TrimSpace,
+ "mdBlock": markdownBlock, // markdown block
"httpStatus": httpStatus,
"cleanupEnumVariant": cleanupEnumVariant,
"gt0": gt0,
+ "path": errorPath,
+ "cmdName": func(in interface{}) (string, error) {
+ // builds the name of a CLI command for a single operation
+ op, isOperation := in.(GenOperation)
+ if !isOperation {
+ ptr, ok := in.(*GenOperation)
+ if !ok {
+ return "", fmt.Errorf("cmdName should be called on a GenOperation, but got: %T", in)
+ }
+ op = *ptr
+ }
+ name := "Operation" + pascalize(op.Package) + pascalize(op.Name) + "Cmd"
+
+ return name, nil // TODO
+ },
+ "cmdGroupName": func(in interface{}) (string, error) {
+ // builds the name of a group of CLI commands
+ opGroup, ok := in.(GenOperationGroup)
+ if !ok {
+ return "", fmt.Errorf("cmdGroupName should be called on a GenOperationGroup, but got: %T", in)
+ }
+ name := "GroupOfOperations" + pascalize(opGroup.Name) + "Cmd"
+
+ return name, nil // TODO
+ },
+ "flagNameVar": func(in string) string {
+ // builds a flag name variable in CLI commands
+ return fmt.Sprintf("flag%sName", pascalize(in))
+ },
+ "flagValueVar": func(in string) string {
+ // builds a flag value variable in CLI commands
+ return fmt.Sprintf("flag%sValue", pascalize(in))
+ },
+ "flagDefaultVar": func(in string) string {
+ // builds a flag default value variable in CLI commands
+ return fmt.Sprintf("flag%sDefault", pascalize(in))
+ },
+ "flagModelVar": func(in string) string {
+ // builds a flag model variable in CLI commands
+ return fmt.Sprintf("flag%sModel", pascalize(in))
+ },
+ "flagDescriptionVar": func(in string) string {
+ // builds a flag description variable in CLI commands
+ return fmt.Sprintf("flag%sDescription", pascalize(in))
+ },
}
for k, v := range extra {
@@ -327,7 +373,6 @@ func (t *Repository) ShallowClone() *Repository {
// LoadDefaults will load the embedded templates
func (t *Repository) LoadDefaults() {
-
for name, asset := range assets {
if err := t.addFile(name, string(asset), true); err != nil {
log.Fatal(err)
@@ -337,26 +382,27 @@ func (t *Repository) LoadDefaults() {
// LoadDir will walk the specified path and add each .gotmpl file it finds to the repository
func (t *Repository) LoadDir(templatePath string) error {
- err := filepath.Walk(templatePath, func(path string, info os.FileInfo, err error) error {
-
+ err := filepath.Walk(templatePath, func(path string, _ os.FileInfo, err error) error {
if strings.HasSuffix(path, ".gotmpl") {
if assetName, e := filepath.Rel(templatePath, path); e == nil {
if data, e := os.ReadFile(path); e == nil {
if ee := t.AddFile(assetName, string(data)); ee != nil {
- return fmt.Errorf("could not add template: %v", ee)
+ return fmt.Errorf("could not add template: %w", ee)
}
}
// Non-readable files are skipped
}
}
+
if err != nil {
return err
}
+
// Non-template files are skipped
return nil
})
if err != nil {
- return fmt.Errorf("could not complete template processing in directory \"%s\": %v", templatePath, err)
+ return fmt.Errorf("could not complete template processing in directory \"%s\": %w", templatePath, err)
}
return nil
}
@@ -392,9 +438,8 @@ func (t *Repository) addFile(name, data string, allowOverride bool) error {
name = swag.ToJSONName(strings.TrimSuffix(name, ".gotmpl"))
templ, err := template.New(name).Funcs(t.funcs).Parse(data)
-
if err != nil {
- return fmt.Errorf("failed to load template %s: %v", name, err)
+ return fmt.Errorf("failed to load template %s: %w", name, err)
}
// check if any protected templates are defined
@@ -441,7 +486,6 @@ func (t *Repository) SetAllowOverride(value bool) {
}
func findDependencies(n parse.Node) []string {
-
var deps []string
depMap := make(map[string]bool)
@@ -491,7 +535,6 @@ func findDependencies(n parse.Node) []string {
}
return deps
-
}
func (t *Repository) flattenDependencies(templ *template.Template, dependencies map[string]bool) map[string]bool {
@@ -516,11 +559,9 @@ func (t *Repository) flattenDependencies(templ *template.Template, dependencies
}
return dependencies
-
}
func (t *Repository) addDependencies(templ *template.Template) (*template.Template, error) {
-
name := templ.Name()
deps := t.flattenDependencies(templ, nil)
@@ -545,9 +586,8 @@ func (t *Repository) addDependencies(templ *template.Template) (*template.Templa
// Add it to the parse tree
templ, err = templ.AddParseTree(dep, tt.Tree)
-
if err != nil {
- return templ, fmt.Errorf("dependency error: %v", err)
+ return templ, fmt.Errorf("dependency error: %w", err)
}
}
@@ -576,7 +616,6 @@ func (t *Repository) DumpTemplates() {
fmt.Fprintf(buf, "Defined in `%s`\n", t.files[name])
if deps := findDependencies(templ.Tree.Root); len(deps) > 0 {
-
fmt.Fprintf(buf, "####requires \n - %v\n\n\n", strings.Join(deps, "\n - "))
}
fmt.Fprintln(buf, "\n---")
@@ -853,3 +892,99 @@ func gt0(in *int64) bool {
// with a pointer
return in != nil && *in > 0
}
+
+func errorPath(in interface{}) (string, error) {
+ // For schemas:
+ // errorPath returns an empty string litteral when the schema path is empty.
+ // It provides a shorthand for template statements such as:
+ // {{ if .Path }}{{ .Path }}{{ else }}" "{{ end }},
+ // which becomes {{ path . }}
+ //
+ // When called for a GenParameter, GenResponse or GenOperation object, it just
+ // returns Path.
+ //
+ // Extra behavior for schemas, when the generation option RootedErroPath is enabled:
+ // In the case of arrays with an empty path, it adds the type name as the path "root",
+ // so consumers of reported errors get an idea of the originator.
+
+ var pth string
+ rooted := func(schema GenSchema) string {
+ if schema.WantsRootedErrorPath && schema.Path == "" && (schema.IsArray || schema.IsMap) {
+ return `"[` + schema.Name + `]"`
+ }
+
+ return schema.Path
+ }
+
+ switch schema := in.(type) {
+ case GenSchema:
+ pth = rooted(schema)
+ case *GenSchema:
+ if schema == nil {
+ break
+ }
+ pth = rooted(*schema)
+ case GenDefinition:
+ pth = rooted(schema.GenSchema)
+ case *GenDefinition:
+ if schema == nil {
+ break
+ }
+ pth = rooted(schema.GenSchema)
+ case GenParameter:
+ pth = schema.Path
+
+ // unchanged Path if called with other types
+ case *GenParameter:
+ if schema == nil {
+ break
+ }
+ pth = schema.Path
+ case GenResponse:
+ pth = schema.Path
+ case *GenResponse:
+ if schema == nil {
+ break
+ }
+ pth = schema.Path
+ case GenOperation:
+ pth = schema.Path
+ case *GenOperation:
+ if schema == nil {
+ break
+ }
+ pth = schema.Path
+ case GenItems:
+ pth = schema.Path
+ case *GenItems:
+ if schema == nil {
+ break
+ }
+ pth = schema.Path
+ case GenHeader:
+ pth = schema.Path
+ case *GenHeader:
+ if schema == nil {
+ break
+ }
+ pth = schema.Path
+ default:
+ return "", fmt.Errorf("errorPath should be called with GenSchema or GenDefinition, but got %T", schema)
+ }
+
+ if pth == "" {
+ return `""`, nil
+ }
+
+ return pth, nil
+}
+
+const mdNewLine = ""
+
+var mdNewLineReplacer = strings.NewReplacer("\r\n", mdNewLine, "\n", mdNewLine, "\r", mdNewLine)
+
+func markdownBlock(in string) string {
+ in = strings.TrimSpace(in)
+
+ return mdNewLineReplacer.Replace(in)
+}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/cli.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/cli.gotmpl
index 3d88c5bebd2..073e33cfaff 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/cli.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/cli.gotmpl
@@ -1,42 +1,45 @@
// Code generated by go-swagger; DO NOT EDIT.
-
{{ if .Copyright -}}// {{ comment .Copyright -}}{{ end }}
-
package {{ .GenOpts.CliPackage }}
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
- {{ imports .DefaultImports }}
- {{ imports .Imports }}
-
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
- "github.com/go-openapi/runtime"
- "github.com/go-openapi/swag"
- httptransport "github.com/go-openapi/runtime/client"
- homedir "github.com/mitchellh/go-homedir"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ {{ imports .DefaultImports }}
+ {{ imports .Imports }}
)
-// debug flag indicating that cli should output debug logs
-var debug bool
-// config file location
-var configFile string
-// dry run flag
-var dryRun bool
+var (
+ // debug flag indicating that cli should output debug logs
+ debug bool
-// name of the executable
-var exeName string = filepath.Base(os.Args[0])
+ // config file location
+ configFile string
+
+ // dry run flag
+ dryRun bool
+
+ // name of the executable
+ exeName = filepath.Base(os.Args[0])
+)
// logDebugf writes debug log to stdout
func logDebugf(format string, v ...interface{}) {
- if !debug{
- return
- }
- log.Printf(format, v...)
+ if !debug{
+ return
+ }
+ log.Printf(format, v...)
}
{{/*TODO: make this a swagger cli option*/}}
@@ -44,199 +47,240 @@ func logDebugf(format string, v ...interface{}) {
var maxDepth int = 5
// makeClient constructs a client object
-func makeClient(cmd *cobra.Command, args []string) (*client.{{ pascalize .Name }}, error) {
- hostname := viper.GetString("hostname")
- viper.SetDefault("base_path", client.DefaultBasePath)
- basePath := viper.GetString("base_path")
- scheme := viper.GetString("scheme")
-
- r := httptransport.New(hostname, basePath, []string{scheme})
- r.SetDebug(debug)
-
- {{- /* user might define custom mediatype xxx/json and there is no registered ones to handle. */}}
- // set custom producer and consumer to use the default ones
- {{ range .Consumes }}
- {{ range .AllSerializers }}
- {{- if stringContains .MediaType "json" }}
- r.Consumers["{{ .MediaType }}"] = runtime.JSONConsumer()
- {{- else }}
- // warning: consumes {{ .MediaType }} is not supported by go-swagger cli yet
- {{- end }}
- {{- end }}
- {{ end }}
- {{ range .Produces }}
- {{- range .AllSerializers }}
- {{- if stringContains .MediaType "json" }}
- r.Producers["{{ .MediaType }}"] = runtime.JSONProducer()
- {{- else }}
- // warning: produces {{ .MediaType }} is not supported by go-swagger cli yet
- {{- end }}
- {{- end }}
- {{ end }}
-
- {{- if .SecurityDefinitions }}
- auth, err := makeAuthInfoWriter(cmd)
- if err != nil {
- return nil, err
- }
- r.DefaultAuthentication = auth
- {{ end }}
- appCli := client.New(r, strfmt.Default)
- logDebugf("Server url: %v://%v", scheme, hostname)
- return appCli, nil
+func makeClient(cmd *cobra.Command, _ []string) (*client.{{ pascalize .Name }}, error) {
+ hostname := viper.GetString("hostname")
+ viper.SetDefault("base_path", client.DefaultBasePath)
+ basePath := viper.GetString("base_path")
+ scheme := viper.GetString("scheme")
+
+ r := httptransport.New(hostname, basePath, []string{scheme})
+ r.SetDebug(debug)
+
+ {{- /* user might define custom mediatype xxx/json and there is no registered ones to handle. */}}
+ // set custom producer and consumer to use the default ones
+ {{ range .Consumes }}
+ {{ range .AllSerializers }}
+ {{- if stringContains .MediaType "json" }}
+ r.Consumers["{{ .MediaType }}"] = runtime.JSONConsumer()
+ {{- else }}
+ // warning: consumes {{ .MediaType }} is not supported by go-swagger cli yet
+ {{- end }}
+ {{- end }}
+ {{ end }}
+ {{ range .Produces }}
+ {{- range .AllSerializers }}
+ {{- if stringContains .MediaType "json" }}
+ r.Producers["{{ .MediaType }}"] = runtime.JSONProducer()
+ {{- else }}
+ // warning: produces {{ .MediaType }} is not supported by go-swagger cli yet
+ {{- end }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .SecurityDefinitions }}
+
+ auth, err := makeAuthInfoWriter(cmd)
+ if err != nil {
+ return nil, err
+ }
+ r.DefaultAuthentication = auth
+ {{- end }}
+
+ appCli := client.New(r, strfmt.Default)
+ logDebugf("Server url: %v://%v", scheme, hostname)
+
+ return appCli, nil
}
// MakeRootCmd returns the root cmd
func MakeRootCmd() (*cobra.Command, error) {
- cobra.OnInitialize(initViperConfigs)
-
- // Use executable name as the command name
- rootCmd := &cobra.Command{
- Use: exeName,
- }
- {{/*note: viper binded flag value must be retrieved from viper rather than cmd*/}}
- // register basic flags
- rootCmd.PersistentFlags().String("hostname", client.DefaultHost, "hostname of the service")
- viper.BindPFlag("hostname", rootCmd.PersistentFlags().Lookup("hostname"))
- rootCmd.PersistentFlags().String("scheme", client.DefaultSchemes[0], fmt.Sprintf("Choose from: %v", client.DefaultSchemes))
- viper.BindPFlag("scheme", rootCmd.PersistentFlags().Lookup("scheme"))
- rootCmd.PersistentFlags().String("base-path", client.DefaultBasePath, fmt.Sprintf("For example: %v", client.DefaultBasePath))
- viper.BindPFlag("base_path", rootCmd.PersistentFlags().Lookup("base-path"))
-
- // configure debug flag
- rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "output debug logs")
- // configure config location
- rootCmd.PersistentFlags().StringVar(&configFile, "config", "", "config file path")
- // configure dry run flag
- rootCmd.PersistentFlags().BoolVar(&dryRun, "dry-run", false, "do not send the request to server")
-
- // register security flags
- {{- if .SecurityDefinitions }}
- if err := registerAuthInoWriterFlags(rootCmd); err != nil{
- return nil, err
- }
- {{- end }}
- // add all operation groups
-{{- range .OperationGroups -}}
- {{- $operationGroupCmdVarName := printf "operationGroup%vCmd" (pascalize .Name) }}
- {{ $operationGroupCmdVarName }}, err := makeOperationGroup{{ pascalize .Name }}Cmd()
- if err != nil {
- return nil, err
- }
- rootCmd.AddCommand({{ $operationGroupCmdVarName }})
-{{ end }}
+ cobra.OnInitialize(initViperConfigs)
+
+ // Use executable name as the command name
+ rootCmd := &cobra.Command{
+ Use: exeName,
+ }
+ {{/*note: viper binded flag value must be retrieved from viper rather than cmd*/}}
+ // register basic flags
+ rootCmd.PersistentFlags().String("hostname", client.DefaultHost, "hostname of the service")
+ if err := viper.BindPFlag("hostname", rootCmd.PersistentFlags().Lookup("hostname")) ; err != nil {
+ return nil, err
+ }
+ rootCmd.PersistentFlags().String("scheme", client.DefaultSchemes[0], fmt.Sprintf("Choose from: %v", client.DefaultSchemes))
+ if err := viper.BindPFlag("scheme", rootCmd.PersistentFlags().Lookup("scheme")) ; err != nil {
+ return nil, err
+ }
+ rootCmd.PersistentFlags().String("base-path", client.DefaultBasePath, fmt.Sprintf("For example: %v", client.DefaultBasePath))
+ if err := viper.BindPFlag("base_path", rootCmd.PersistentFlags().Lookup("base-path")) ; err != nil {
+ return nil, err
+ }
+
+ // configure debug flag
+ rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "output debug logs")
+ // configure config location
+ rootCmd.PersistentFlags().StringVar(&configFile, "config", "", "config file path")
+ // configure dry run flag
+ rootCmd.PersistentFlags().BoolVar(&dryRun, "dry-run", false, "do not send the request to server")
+
+ // register security flags
+ {{- if .SecurityDefinitions }}
+ if err := registerAuthInoWriterFlags(rootCmd); err != nil{
+ return nil, err
+ }
+ {{- end }}
+
+ // add all operation groups
+{{- range $index,$element := .OperationGroups }}
+ c{{ $index }}, err := make{{ cmdGroupName $element }}()
+ if err != nil {
+ return nil, err
+ }
+ rootCmd.AddCommand(c{{ $index}})
+{{- end }}
- // add cobra completion
- rootCmd.AddCommand(makeGenCompletionCmd())
+ // add cobra completion
+ rootCmd.AddCommand(makeGenCompletionCmd())
- return rootCmd, nil
+ return rootCmd, nil
}
// initViperConfigs initialize viper config using config file in '$HOME/.config//config.'
// currently hostname, scheme and auth tokens can be specified in this config file.
func initViperConfigs() {
- if configFile != "" {
- // use user specified config file location
- viper.SetConfigFile(configFile)
- }else{
- // look for default config
- // Find home directory.
- home, err := homedir.Dir()
- cobra.CheckErr(err)
-
- // Search config in home directory with name ".cobra" (without extension).
- viper.AddConfigPath(path.Join(home, ".config", exeName))
- viper.SetConfigName("config")
- }
-
- if err := viper.ReadInConfig(); err != nil {
- logDebugf("Error: loading config file: %v", err)
- return
- }
- logDebugf("Using config file: %v", viper.ConfigFileUsed())
+ if configFile != "" {
+ // use user specified config file location
+ viper.SetConfigFile(configFile)
+ } else{
+ var (
+ configDir string
+ err error
+ )
+
+ // look for default config (OS-specific, e.g. ".config" on linux)
+ configDir, err = os.UserConfigDir()
+ if err != nil {
+ // fallback and try finding the home directory.
+ home, err := os.UserHomeDir()
+ cobra.CheckErr(err)
+ configDir = path.Join(home, ".config")
+ }
+
+ // Search config in the config directory with name of the CLI binary (without extension).
+ configDir = path.Join(configDir, exeName)
+ viper.AddConfigPath(configDir)
+ viper.SetConfigName("config")
+ }
+
+ if err := viper.ReadInConfig(); err != nil {
+ logDebugf("Error: loading config file: %v", err)
+ return
+ }
+ logDebugf("Using config file: %v", viper.ConfigFileUsed())
}
{{- if .SecurityDefinitions }}
-{{- /*youyuan: rework this since spec may define multiple auth schemes.
- cli needs to detect which one user passed rather than add all of them.*/}}
+{{- /*youyuan: rework this since spec may define multiple auth schemes.
+ cli needs to detect which one user passed rather than add all of them.*/}}
+
// registerAuthInoWriterFlags registers all flags needed to perform authentication
func registerAuthInoWriterFlags(cmd *cobra.Command) error {
{{- range .SecurityDefinitions }}
- /*{{.Name}} {{.Description}}*/
- {{- if .IsBasicAuth }}
- cmd.PersistentFlags().String("username", "", "username for basic auth")
- viper.BindPFlag("username", cmd.PersistentFlags().Lookup("username"))
- cmd.PersistentFlags().String("password", "", "password for basic auth")
- viper.BindPFlag("password", cmd.PersistentFlags().Lookup("password"))
- {{- end }}
- {{- if .IsAPIKeyAuth }}
- cmd.PersistentFlags().String("{{.Name}}", "", `{{.Description}}`)
- viper.BindPFlag("{{.Name}}", cmd.PersistentFlags().Lookup("{{.Name}}"))
- {{- end }}
- {{- if .IsOAuth2 }}
- // oauth2: let user provide the token in a flag, rather than implement the logic to fetch the token.
- cmd.PersistentFlags().String("oauth2-token", "", `{{.Description}}`)
- viper.BindPFlag("oauth2-token", cmd.PersistentFlags().Lookup("oauth2-token"))
- {{- end }}
-{{- end }}
- return nil
+ // {{.Name}}
+ {{- if .Description }}
+ {{- comment .Description }}
+ {{- end }}
+ {{- if .IsBasicAuth }}
+ cmd.PersistentFlags().String("username", "", "username for basic auth")
+ if err := viper.BindPFlag("username", cmd.PersistentFlags().Lookup("username")) ; err != nil {
+ return err
+ }
+ cmd.PersistentFlags().String("password", "", "password for basic auth")
+ if err := viper.BindPFlag("password", cmd.PersistentFlags().Lookup("password")) ; err != nil {
+ return err
+ }
+ {{- end }}
+ {{- if .IsAPIKeyAuth }}
+ cmd.PersistentFlags().String("{{.Name}}", "", `{{.Description}}`)
+ if err := viper.BindPFlag("{{.Name}}", cmd.PersistentFlags().Lookup("{{.Name}}")) ; err != nil {
+ return err
+ }
+ {{- end }}
+ {{- if .IsOAuth2 }}
+ // oauth2: let user provide the token in a flag, rather than implement the logic to fetch the token.
+ cmd.PersistentFlags().String("oauth2-token", "", `{{.Description}}`)
+ if err := viper.BindPFlag("oauth2-token", cmd.PersistentFlags().Lookup("oauth2-token")) ; err != nil {
+ return err
+ }
+ {{- end }}
+{{ end }}
+
+ return nil
}
// makeAuthInfoWriter retrieves cmd flags and construct an auth info writer
func makeAuthInfoWriter(cmd *cobra.Command) (runtime.ClientAuthInfoWriter, error) {
- auths := []runtime.ClientAuthInfoWriter{}
+ auths := []runtime.ClientAuthInfoWriter{}
{{- range .SecurityDefinitions }}
- /*{{.Name}} {{.Description}}*/
- {{- if .IsBasicAuth }}
- if viper.IsSet("username") {
- usr := viper.GetString("username")
- if !viper.IsSet("password"){
- return nil, fmt.Errorf("Basic Auth password for user [%v] is not provided.", usr)
- }
- pwd := viper.GetString("password")
- auths = append(auths, httptransport.BasicAuth(usr,pwd))
- }
- {{- end }}
- {{- if .IsAPIKeyAuth }}
- if viper.IsSet("{{.Name}}") {
- {{ pascalize .Name }}Key := viper.GetString("{{.Name}}")
- auths = append(auths, httptransport.APIKeyAuth("{{.Name}}", "{{.In}}", {{ pascalize .Name }}Key))
- }
- {{- end }}
- {{- if .IsOAuth2 }}
- if viper.IsSet("oauth2-token") {
- // oauth2 workflow for generated CLI is not ideal.
- // If you have suggestions on how to support it, raise an issue here: https://github.com/go-swagger/go-swagger/issues
- // This will be added to header: "Authorization: Bearer {oauth2-token value}"
- token := viper.GetString("oauth2-token")
- auths = append(auths, httptransport.BearerToken(token))
+
+ // {{.Name}}
+ {{- if .Description }}
+ {{- comment .Description }}
+ {{- end }}
+ {{- if .IsBasicAuth }}
+ if viper.IsSet("username") {
+ usr := viper.GetString("username")
+ if !viper.IsSet("password"){
+ return nil, fmt.Errorf("Basic Auth password for user [%v] is not provided.", usr)
}
- {{- end }}
+ pwd := viper.GetString("password")
+ auths = append(auths, httptransport.BasicAuth(usr,pwd))
+ }
+ {{- end }}
+ {{- if .IsAPIKeyAuth }}
+ if viper.IsSet("{{.Name}}") {
+ {{ pascalize .Name }}Key := viper.GetString("{{.Name}}")
+ auths = append(auths, httptransport.APIKeyAuth("{{.Name}}", "{{.In}}", {{ pascalize .Name }}Key))
+ }
+ {{- end }}
+ {{- if .IsOAuth2 }}
+ if viper.IsSet("oauth2-token") {
+ // oauth2 workflow for generated CLI is not ideal.
+ // If you have suggestions on how to support it, raise an issue here: https://github.com/go-swagger/go-swagger/issues
+ // This will be added to header: "Authorization: Bearer {oauth2-token value}"
+ token := viper.GetString("oauth2-token")
+ auths = append(auths, httptransport.BearerToken(token))
+ }
+ {{- end }}
{{- end }}
- if len(auths) == 0 {
- logDebugf("Warning: No auth params detected.")
- return nil, nil
- }
- // compose all auths together
- return httptransport.Compose(auths...), nil
+
+ if len(auths) == 0 {
+ logDebugf("Warning: No auth params detected.")
+ return nil, nil
+ }
+
+ // compose all auths together
+ return httptransport.Compose(auths...), nil
}
{{- end }}
-{{ range .OperationGroups -}}
-func makeOperationGroup{{ pascalize .Name }}Cmd() (*cobra.Command, error) {
- {{- $operationGroupCmdVarName := printf "operationGroup%vCmd" (pascalize .Name) }}
- {{ $operationGroupCmdVarName }} := &cobra.Command{
- Use: "{{ .Name }}",
- Long: `{{ .Description }}`,
- }
-{{ range .Operations }}
- {{- $operationCmdVarName := printf "operation%vCmd" (pascalize .Name) }}
- {{ $operationCmdVarName }}, err := makeOperation{{pascalize .Package}}{{ pascalize .Name }}Cmd()
- if err != nil {
- return nil, err
- }
- {{ $operationGroupCmdVarName }}.AddCommand({{ $operationCmdVarName }})
-{{ end }}
- return {{ $operationGroupCmdVarName }}, nil
+{{- range .OperationGroups -}}
+
+// make{{ cmdGroupName . }} returns a parent command to handle all operations with tag {{ printf "%q" .Name }}
+func make{{ cmdGroupName . }}() (*cobra.Command, error) {
+ parent := &cobra.Command{
+ Use: "{{ .Name }}",
+ Long: `{{ .Description }}`,
+ }
+
+ {{- range $index,$element := .Operations }}
+
+ sub{{ $index }}, err := make{{ cmdName $element }}()
+ if err != nil {
+ return nil, err
+ }
+ parent.AddCommand(sub{{ $index }})
+ {{- end }}
+
+ return parent, nil
}
-{{ end }} {{/*operation group*/}}
+{{- end }} {{/*operation group*/}}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/main.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/main.gotmpl
index 6cc470a2f4d..e94e617e508 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/main.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/main.gotmpl
@@ -1,28 +1,29 @@
// Code generated by go-swagger; DO NOT EDIT.
-
{{ if .Copyright -}}// {{ comment .Copyright -}}{{ end }}
-
package main
import (
- "encoding/json"
- {{ imports .DefaultImports }}
- {{ imports .Imports }}
+ "fmt"
+ "os"
+
+ {{ imports .DefaultImports }}
+ {{ imports .Imports }}
)
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
func main() {
- rootCmd,err := cli.MakeRootCmd()
+ rootCmd, err := cli.MakeRootCmd()
if err != nil {
- fmt.Println("Cmd construction error: ", err)
+ fmt.Println("cmd construction error: ", err)
os.Exit(1)
}
-
- if err := rootCmd.Execute(); err != nil {
+
+ if err = rootCmd.Execute(); err != nil {
+ fmt.Println("cmd execute error: ", err)
os.Exit(1)
}
-}
\ No newline at end of file
+}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/modelcli.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/modelcli.gotmpl
index d93e91d41f6..49b106bf96d 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/modelcli.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/modelcli.gotmpl
@@ -11,9 +11,12 @@ package cli
import (
- {{ imports .DefaultImports }}
- {{ imports .Imports }}
- "github.com/spf13/cobra"
+ "encoding/json"
+ "fmt"
+
+ "github.com/spf13/cobra"
+ {{ imports .DefaultImports }}
+ {{ imports .Imports }}
)
// Schema cli for {{.GoType}}
@@ -22,4 +25,4 @@ import (
{{ range .ExtraSchemas }}
// Extra schema cli for {{.GoType}}
{{ template "modelschemacli" .}}
-{{ end }}
\ No newline at end of file
+{{ end }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/operation.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/operation.gotmpl
index 10666ed783a..dc09dac07e9 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/operation.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/operation.gotmpl
@@ -1,26 +1,26 @@
// Code generated by go-swagger; DO NOT EDIT.
-
{{ if .Copyright -}}// {{ comment .Copyright -}}{{ end }}
-{{- /*TODO: do not hardcode cli pkg*/}}
-package cli
+package cli {{/* TODO: do not hardcode cli pkg */}}
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
- {{ imports .DefaultImports }}
- {{ imports .Imports }}
+ "fmt"
+
+ {{ imports .DefaultImports }}
+ {{ imports .Imports }}
- "github.com/spf13/cobra"
- "github.com/go-openapi/runtime"
- "github.com/go-openapi/swag"
- httptransport "github.com/go-openapi/runtime/client"
+ "github.com/spf13/cobra"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/swag"
+ httptransport "github.com/go-openapi/runtime/client"
)
-// makeOperation{{pascalize .Package}}{{ pascalize .Name }}Cmd returns a cmd to handle operation {{ camelize .Name }}
-func makeOperation{{pascalize .Package}}{{ pascalize .Name }}Cmd() (*cobra.Command, error) {
+// make{{ cmdName . }} returns a command to handle operation {{ camelize .Name }}
+func make{{ cmdName . }}() (*cobra.Command, error) {
cmd := &cobra.Command{
Use: "{{ .Name }}",
Short: `{{ escapeBackticks .Description}}`,
@@ -46,12 +46,11 @@ func runOperation{{pascalize $operationGroup }}{{ pascalize $operation }}(cmd *c
// retrieve flag values from cmd and fill params
params := {{ .PackageAlias }}.New{{ pascalize .Name}}Params()
{{- range .Params }}
- if err, _ := retrieveOperation{{pascalize $operationGroup }}{{ pascalize $operation }}{{ pascalize .Name }}Flag(params, "", cmd); err != nil{
+ if err, _ = retrieveOperation{{pascalize $operationGroup }}{{ pascalize $operation }}{{ pascalize .Name }}Flag(params, "", cmd); err != nil{
return err
}
{{- end }} {{/*Params*/}}
- if dryRun {
- {{/* Note: dry run is not very useful for now, but useful when validation is added in future*/}}
+ if dryRun { {{/* Note: dry run is not very useful for now, but useful when validation is added in future*/}}
logDebugf("dry-run flag specified. Skip sending request.")
return nil
}
@@ -61,10 +60,11 @@ func runOperation{{pascalize $operationGroup }}{{ pascalize $operation }}(cmd *c
if err != nil {
return err
}
- if !debug{
- {{/* In debug mode content should have been printed in transport layer, so do not print again*/}}
+
+ if !debug{ {{/* In debug mode content should have been printed in transport layer, so do not print again*/}}
fmt.Println(msgStr)
}
+
return nil
}
@@ -77,9 +77,9 @@ func registerOperation{{pascalize $operationGroup }}{{ pascalize $operation }}Pa
{{- end }}
return nil
}
-
{{/*register functions for each fields in this operation*/}}
{{- range .Params }}
+
func registerOperation{{pascalize $operationGroup }}{{ pascalize $operation }}{{pascalize .Name }}ParamFlags(cmdPrefix string, cmd *cobra.Command) error{
{{- if .IsPrimitive }}
{{ template "primitiveregistrator" . }}
@@ -96,12 +96,12 @@ func registerOperation{{pascalize $operationGroup }}{{ pascalize $operation }}{{
}
{{- end }}
-{{/*functions to retreive each field of params*/}}
+{{/*functions to retrieve each field of params*/}}
{{- range .Params }}
+
func retrieveOperation{{pascalize $operationGroup }}{{ pascalize $operation }}{{ pascalize .Name }}Flag(m *{{ $operationPkgAlias }}.{{ pascalize $operation }}Params, cmdPrefix string, cmd *cobra.Command) (error,bool){
retAdded := false
{{- $flagStr := .Name }}
- {{- $flagValueVar := printf "%vValue" (camelize .Name) }}
{{- /*only set the param if user set the flag*/}}
if cmd.Flags().Changed("{{ $flagStr }}") {
{{- if .IsPrimitive }}
@@ -113,16 +113,16 @@ func retrieveOperation{{pascalize $operationGroup }}{{ pascalize $operation }}{{
{{- else if and .IsBodyParam .Schema .IsComplexObject (not .IsStream) }}
{{- /*schema payload can be passed in cmd as a string and here is unmarshalled to model struct and attached in params*/}}
// Read {{ $flagStr }} string from cmd and unmarshal
- {{ $flagValueVar }}Str, err := cmd.Flags().GetString("{{ $flagStr }}")
+ {{ flagValueVar .Name }}Str, err := cmd.Flags().GetString("{{ $flagStr }}")
if err != nil {
return err, false
}
{{/*Note anonymous body schema is not pointer*/}}
- {{ $flagValueVar }} := {{if containsPkgStr .GoType}}{{ .GoType }}{{else}}{{ .Pkg }}.{{.GoType}}{{ end }}{}
- if err := json.Unmarshal([]byte({{ $flagValueVar }}Str), &{{ $flagValueVar }}); err!= nil{
+ {{ flagValueVar .Name }} := {{if containsPkgStr .GoType}}{{ .GoType }}{{else}}{{ .Pkg }}.{{.GoType}}{{ end }}{}
+ if err := json.Unmarshal([]byte({{ flagValueVar .Name }}Str), &{{ flagValueVar .Name }}); err!= nil{
return fmt.Errorf("cannot unmarshal {{ $flagStr }} string in {{.GoType}}: %v", err), false
}
- m.{{ .ID }} = {{- if .IsNullable }}&{{- end }}{{ $flagValueVar }}
+ m.{{ .ID }} = {{- if .IsNullable }}&{{- end }}{{ flagValueVar .Name }}
{{- else }}
// warning: {{.GoType}} is not supported by go-swagger cli yet
{{- end }} {{/*end go type case*/}}
@@ -131,32 +131,32 @@ func retrieveOperation{{pascalize $operationGroup }}{{ pascalize $operation }}{{
{{- /* Add flags to capture fields in Body. If previously Body struct was constructed in unmarshalling body string,
then reuse the struct, otherwise construct an empty value struct to fill. Here body field flags overwrites
unmarshalled body string values. */}}
- {{- $flagModelVar := printf "%vModel" (camelize $flagValueVar) }}
- {{ $flagModelVar }} := m.{{ .ID }}
- if swag.IsZero({{ $flagModelVar }}){
- {{ $flagModelVar }} = {{- if .IsNullable }}&{{- end }}{{if containsPkgStr .GoType}}{{ .GoType }}{{else}}{{ .Pkg }}.{{.GoType}}{{ end }}{}
+ {{ flagModelVar .Name }} := m.{{ .ID }}
+ if swag.IsZero({{ flagModelVar .Name }}){
+ {{ flagModelVar .Name }} = {{- if .IsNullable }}&{{- end }}{{if containsPkgStr .GoType}}{{ .GoType }}{{else}}{{ .Pkg }}.{{.GoType}}{{ end }}{}
}
{{- /*Only attach the body struct in params if user passed some flag filling some body fields.*/}}
- {{- /* add "&" to $flagModelVar when it is not nullable because the retrieve method always expects a pointer */}}
- err, added := retrieveModel{{ pascalize (dropPackage .GoType) }}Flags(0, {{if not .IsNullable}}&{{end}}{{ $flagModelVar }}, "{{ camelize (dropPackage .GoType) }}", cmd)
+ {{- /* add "&" to flagModelVar .Name when it is not nullable because the retrieve method always expects a pointer */}}
+ err, added := retrieveModel{{ pascalize (dropPackage .GoType) }}Flags(0, {{if not .IsNullable}}&{{end}}{{ flagModelVar .Name }}, "{{ camelize (dropPackage .GoType) }}", cmd)
if err != nil{
return err, false
}
if added {
- m.{{.ID}} = {{ $flagModelVar }}
+ m.{{.ID}} = {{ flagModelVar .Name }}
}
- if dryRun && debug {
- {{/* dry run we don't get trasnport debug strings, so print it here*/}}
- {{- $bodyDebugVar := printf "%vDebugBytes" (camelize $flagValueVar) }}
+
+ if dryRun && debug { {{/* dry run we don't get trasnport debug strings, so print it here*/}}
+ {{- $bodyDebugVar := printf "%vDebugBytes" (flagValueVar .Name) }}
{{ $bodyDebugVar }}, err := json.Marshal(m.{{.ID}})
if err != nil{
return err, false
}
logDebugf("{{.ID }} dry-run payload: %v", string({{ $bodyDebugVar }}))
}
- retAdded = retAdded || added
- {{/*body debug string will be printed in transport layer*/}}
+
+ retAdded = retAdded || added {{/*body debug string will be printed in transport layer*/}}
{{- end }}
+
return nil, retAdded
}
{{- end }} {{/*Params*/}}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/registerflag.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/registerflag.gotmpl
index 6378111550d..fb0dba04b84 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/registerflag.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/registerflag.gotmpl
@@ -8,34 +8,32 @@
{{- if .Enum }}
{{- $fullDescription = printf "Enum: %v. %v" (json .Enum) $fullDescription}}
{{- end }}
- {{ camelize .Name }}Description := `{{ $fullDescription }}`
+ {{ flagDescriptionVar .Name }} := `{{ $fullDescription }}`
{{ end }}
{{ define "flagnamevar" }}
- {{- $flagNameVar := printf "%vFlagName" (camelize .Name) }}
- var {{ $flagNameVar }} string
+ var {{ flagNameVar .Name }} string
if cmdPrefix == "" {
- {{ $flagNameVar }} = "{{ .Name }}"
+ {{ flagNameVar .Name }} = "{{ .Name }}"
}else{
- {{ $flagNameVar }} = fmt.Sprintf("%v.{{ .Name }}", cmdPrefix)
+ {{ flagNameVar .Name }} = fmt.Sprintf("%v.{{ .Name }}", cmdPrefix)
}
{{ end }}
{{ define "flagdefaultvar" }}
- {{ $defaultVar := printf "%vFlagDefault" (camelize .Name) }}
- var {{ $defaultVar}} {{ .GoType }} {{ if .Default }}= {{ printf "%#v" .Default }}{{ end }}
+ var {{ flagDefaultVar .Name }} {{ .GoType }} {{ if .Default }}= {{ printf "%#v" .Default }}{{ end }}
{{ end }}
{{/* Not used. CLI does not mark flag as required, and required will be checked by validation in future */}}
{{/* {{ define "requiredregistrator" }}
- if err := cmd.MarkPersistentFlagRequired({{ camelize .Name }}FlagName); err != nil{
+ if err := cmd.MarkPersistentFlagRequired({{ flagNameVar .Name }}); err != nil{
return err
}
{{ end }} */}}
{{ define "enumcompletion" }} {{/*only used for primitive types. completion type is always string.*/}}
{{ if .Enum }}
-if err := cmd.RegisterFlagCompletionFunc({{ camelize .Name }}FlagName,
+if err := cmd.RegisterFlagCompletionFunc({{ flagNameVar .Name }},
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var res []string
if err := json.Unmarshal([]byte(`{{ json .Enum }}`), &res); err != nil {
@@ -54,12 +52,12 @@ if err := cmd.RegisterFlagCompletionFunc({{ camelize .Name }}FlagName,
{{ template "flagdescriptionvar" . }}
{{ template "flagnamevar" . }}
{{ template "flagdefaultvar" . }}
- _ = cmd.PersistentFlags().{{ pascalize .GoType }}({{ camelize .Name }}FlagName, {{ camelize .Name }}FlagDefault, {{ (camelize .Name) }}Description)
+ _ = cmd.PersistentFlags().{{ pascalize .GoType }}({{ flagNameVar .Name }}, {{ flagDefaultVar .Name }}, {{ flagDescriptionVar .Name }})
{{ template "enumcompletion" . }}
- {{- else if or (eq .GoType "strfmt.DateTime") (eq .GoType "strfmt.UUID") (eq .GoType "strfmt.ObjectId") }} {{/* read as string */}}
+ {{- else if or (eq .GoType "strfmt.DateTime") (eq .GoType "strfmt.UUID") (eq .GoType "strfmt.ObjectId") (eq .GoType "strfmt.ULID") }} {{/* read as string */}}
{{ template "flagdescriptionvar" . }}
{{ template "flagnamevar" . }}
- _ = cmd.PersistentFlags().String({{ camelize .Name }}FlagName, "", {{ (camelize .Name) }}Description)
+ _ = cmd.PersistentFlags().String({{ flagNameVar .Name }}, "", {{ flagDescriptionVar .Name }})
{{ template "enumcompletion" . }}
{{- else }}
// warning: primitive {{.Name}} {{.GoType }} is not supported by go-swagger cli yet
@@ -71,12 +69,12 @@ if err := cmd.RegisterFlagCompletionFunc({{ camelize .Name }}FlagName,
{{ template "flagdescriptionvar" . }}
{{ template "flagnamevar" . }}
{{ template "flagdefaultvar" . }}
- _ = cmd.PersistentFlags().{{ pascalize .GoType }}Slice({{ camelize .Name }}FlagName, {{ camelize .Name }}FlagDefault, {{ (camelize .Name) }}Description)
+ _ = cmd.PersistentFlags().{{ pascalize .GoType }}Slice({{ flagNameVar .Name }}, {{ flagDefaultVar .Name }}, {{ flagDescriptionVar .Name }})
{{ template "enumcompletion" . }}
- {{- else if or (eq .GoType "[]strfmt.DateTime") (eq .GoType "[]strfmt.UUID") (eq .GoType "[]strfmt.ObjectId") }} {{/* read as string */}}
+ {{- else if or (eq .GoType "[]strfmt.DateTime") (eq .GoType "[]strfmt.UUID") (eq .GoType "[]strfmt.ObjectId") (eq .GoType "[]strfmt.ULID") }} {{/* read as string */}}
{{ template "flagdescriptionvar" . }}
{{ template "flagnamevar" . }}
- _ = cmd.PersistentFlags().StringSlice({{ camelize .Name }}FlagName, []string{}, {{ (camelize .Name) }}Description)
+ _ = cmd.PersistentFlags().StringSlice({{ flagNameVar .Name }}, []string{}, {{ flagDescriptionVar .Name }})
{{- else }}
// warning: array {{.Name}} {{.GoType }} is not supported by go-swagger cli yet
{{- end }}
@@ -86,7 +84,7 @@ if err := cmd.RegisterFlagCompletionFunc({{ camelize .Name }}FlagName,
{{/* each body parameter gets a string flag to input json raw string */}}
{{ define "modelparamstringregistrator" }}
{{ template "flagnamevar" . }}
- _ = cmd.PersistentFlags().String({{ camelize .Name }}FlagName, "", "Optional json string for [{{ .Name }}]. {{ .Description }}")
+ _ = cmd.PersistentFlags().String({{ flagNameVar .Name }}, "", `Optional json string for [{{ .Name }}]. {{ escapeBackticks .Description }}`)
{{ end }}
{{ define "modelparamregistrator" }} {{/* register a param that has a schema */}}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/retrieveflag.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/retrieveflag.gotmpl
index a1ff1e5dee1..d702340ab2a 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/retrieveflag.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/retrieveflag.gotmpl
@@ -1,59 +1,55 @@
{{/*util functions to retrieve flags*/}}
{{ define "primitiveretriever" }}
- {{- $flagValueVar := printf "%vFlagValue" (camelize .Name) }}
- {{- $flagNameVar := printf "%vFlagName" (camelize .Name )}}
{{- if or (eq .GoType "int64") (eq .GoType "int32") (eq .GoType "string") (eq .GoType "float64") (eq .GoType "float32") (eq .GoType "bool") }}
{{ template "flagnamevar" . }}
- {{ $flagValueVar }}, err := cmd.Flags().Get{{pascalize .GoType}}({{ $flagNameVar }})
+ {{ flagValueVar .Name }}, err := cmd.Flags().Get{{pascalize .GoType}}({{ flagNameVar .Name }})
if err != nil{
return err, false
}
{{- /* reciever by convention is m for CLI */}}
- m.{{ pascalize .Name }} = {{- if .IsNullable }}&{{- end }}{{ $flagValueVar }}
+ m.{{ pascalize .Name }} = {{- if .IsNullable }}&{{- end }}{{ flagValueVar .Name }}
{{- else if or (eq .GoType "strfmt.DateTime") (eq .GoType "strfmt.ObjectId") (eq .GoType "strfmt.UUID" ) }} {{/*Get flag value as string, then parse it*/}}
{{/*Many of the strfmt types can be added here*/}}
{{ template "flagnamevar" . }}
- {{ $flagValueVar }}Str, err := cmd.Flags().GetString({{ $flagNameVar }})
+ {{ flagValueVar .Name }}Str, err := cmd.Flags().GetString({{ flagNameVar .Name }})
if err != nil{
return err, false
}
- var {{ $flagValueVar }} {{ .GoType }}
- if err := {{ $flagValueVar }}.UnmarshalText([]byte({{ $flagValueVar }}Str)); err != nil{
+ var {{ flagValueVar .Name }} {{ .GoType }}
+ if err := {{ flagValueVar .Name }}.UnmarshalText([]byte({{ flagValueVar .Name }}Str)); err != nil{
return err, false
}
- m.{{ pascalize .Name }} = {{- if .IsNullable }}&{{- end }}{{ $flagValueVar }}
+ m.{{ pascalize .Name }} = {{- if .IsNullable }}&{{- end }}{{ flagValueVar .Name }}
{{- else }}
// warning: primitive {{.Name}} {{.GoType }} is not supported by go-swagger cli yet
{{- end }}
{{ end }}
{{ define "arrayretriever" }}
- {{- $flagValueVar := printf "%vFlagValues" (camelize .Name) }}
- {{- $flagNameVar := printf "%vFlagName" (camelize .Name )}}
{{- if or (eq .GoType "[]int64") (eq .GoType "[]int32") (eq .GoType "[]string") (eq .GoType "[]float64") (eq .GoType "[]float32") (eq .GoType "[]bool") }}
{{ template "flagnamevar" . }}
- {{ $flagValueVar }}, err := cmd.Flags().Get{{pascalize .GoType}}Slice({{ $flagNameVar }})
+ {{ flagValueVar .Name }}, err := cmd.Flags().Get{{pascalize .GoType}}Slice({{ flagNameVar .Name }})
if err != nil{
return err, false
}
- {{- /* reciever by convention is m for CLI */}}
- m.{{ pascalize .Name }} = {{ $flagValueVar }}
+ {{- /* receiver by convention is m for CLI */}}
+ m.{{ pascalize .Name }} = {{ flagValueVar .Name }}
{{- else if or (eq .GoType "[]strfmt.DateTime") (eq .GoType "[]strfmt.ObjectId") (eq .GoType "[]strfmt.UUID") }} {{/*Get flag value as string, then parse it*/}}
{{ template "flagnamevar" . }}
- {{ $flagValueVar }}Str, err := cmd.Flags().GetStringSlice({{ $flagNameVar }})
+ {{ flagValueVar .Name }}Str, err := cmd.Flags().GetStringSlice({{ flagNameVar .Name }})
if err != nil{
return err, false
}
- {{ $flagValueVar }} := make({{ .GoType }}, len({{ $flagValueVar }}Str))
- for i, v := range {{ $flagValueVar }}Str {
- if err := {{ $flagValueVar }}[i].UnmarshalText([]byte(v)); err != nil{
+ {{ flagValueVar .Name }} := make({{ .GoType }}, len({{ flagValueVar .Name }}Str))
+ for i, v := range {{ flagValueVar .Name }}Str {
+ if err := {{ flagValueVar .Name }}[i].UnmarshalText([]byte(v)); err != nil{
return err, false
}
}
- m.{{ pascalize .Name }} = {{- if .IsNullable }}&{{- end }}{{ $flagValueVar }}
+ m.{{ pascalize .Name }} = {{- if .IsNullable }}&{{- end }}{{ flagValueVar .Name }}
{{- else }}
// warning: array {{.Name}} {{.GoType }} is not supported by go-swagger cli yet
{{- end }}
-{{ end }}
\ No newline at end of file
+{{ end }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/schema.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/schema.gotmpl
index 2dc42aebc82..cd5b13ca56c 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/schema.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/cli/schema.gotmpl
@@ -14,12 +14,12 @@
{{- if .IsPrimitive }}
{{ template "primitiveregistrator" . }}
{{- else if .IsArray }}
- // warning: {{.Name}} {{ .GoType }} array type is not supported by go-swagger cli yet
+ // warning: {{.Name}} {{ .GoType }} array type is not supported by go-swagger cli yet
{{- else if .IsMap }}
// warning: {{.Name}} {{ .GoType }} map type is not supported by go-swagger cli yet
{{- else if .IsComplexObject }} {{/* struct case */}}
{{ template "flagnamevar" . }}
- if err := registerModel{{pascalize (dropPackage .GoType) }}Flags(depth + 1, {{ camelize .Name }}FlagName, cmd); err != nil{
+ if err := registerModel{{pascalize (dropPackage .GoType) }}Flags(depth + 1, {{ flagNameVar .Name }}, cmd); err != nil{
return err
}
{{- else }}
@@ -28,10 +28,8 @@
{{ end }}
{{ define "propertyretriever" }}
- {{- $flagNameVar := printf "%vFlagName" (camelize .Name) }}
- {{- $flagValueVar := printf "%vFlagValue" (camelize .Name) }}
- {{ $flagNameVar }} := fmt.Sprintf("%v.{{ .Name }}", cmdPrefix)
- if cmd.Flags().Changed({{ $flagNameVar }}) {
+ {{ flagNameVar .Name }} := fmt.Sprintf("%v.{{ .Name }}", cmdPrefix)
+ if cmd.Flags().Changed({{ flagNameVar .Name }}) {
{{- if .IsPrimitive }}
{{ template "primitiveretriever" . }}
retAdded = true
@@ -46,18 +44,18 @@
{{- end }}
}
{{- if and .IsComplexObject (not .IsArray) (not .IsMap) (not .IsStream) }}
- {{ $flagValueVar }} := m.{{pascalize .Name}}
- if swag.IsZero({{ $flagValueVar }}){
- {{ $flagValueVar }} = {{if .IsNullable }}&{{end}}{{if containsPkgStr .GoType}}{{ .GoType }}{{else}}{{ .Pkg }}.{{.GoType}}{{ end }}{}
+ {{ flagValueVar .Name }} := m.{{pascalize .Name}}
+ if swag.IsZero({{ flagValueVar .Name }}){
+ {{ flagValueVar .Name }} = {{if .IsNullable }}&{{end}}{{if containsPkgStr .GoType}}{{ .GoType }}{{else}}{{ .Pkg }}.{{.GoType}}{{ end }}{}
}
{{/* always lift the payload to pointer and pass to model retrieve function. If .GoType has pkg str, use it, else use .Pkg+.GoType */}}
- err, {{camelize .Name }}Added := retrieveModel{{pascalize (dropPackage .GoType) }}Flags(depth + 1, {{if not .IsNullable }}&{{end}}{{ $flagValueVar }}, {{ $flagNameVar }}, cmd)
+ err, {{pascalize .Name }}Added := retrieveModel{{pascalize (dropPackage .GoType) }}Flags(depth + 1, {{if not .IsNullable }}&{{end}}{{ flagValueVar .Name }}, {{ flagNameVar .Name }}, cmd)
if err != nil{
return err, false
}
- retAdded = retAdded || {{camelize .Name }}Added
- if {{camelize .Name }}Added {
- m.{{pascalize .Name}} = {{ $flagValueVar }}
+ retAdded = retAdded || {{pascalize .Name }}Added
+ if {{pascalize .Name }}Added {
+ m.{{pascalize .Name}} = {{ flagValueVar .Name }}
}
{{- end }}
{{ end }}
@@ -85,14 +83,14 @@ func registerModel{{pascalize .Name}}Flags(depth int, cmdPrefix string, cmd *cob
// register anonymous fields for {{.Name}}
{{ $anonName := .Name }}
{{ range .Properties }}
- if err := register{{ pascalize $modelName }}Anon{{pascalize $anonName }}{{ pascalize .Name }}(depth, cmdPrefix, cmd); err != nil{
+ if err := register{{ pascalize $modelName }}PropAnon{{pascalize $anonName }}{{ pascalize .Name }}(depth, cmdPrefix, cmd); err != nil{
return err
}
{{ end }}
{{ end }}
{{ end }}
{{ range .Properties }}
- if err := register{{ pascalize $modelName }}{{ pascalize .Name }}(depth, cmdPrefix, cmd); err != nil{
+ if err := register{{ pascalize $modelName }}Prop{{ pascalize .Name }}(depth, cmdPrefix, cmd); err != nil{
return err
}
{{ end }}
@@ -104,7 +102,7 @@ func registerModel{{pascalize .Name}}Flags(depth int, cmdPrefix string, cmd *cob
// inline definition name {{ .Name }}, type {{.GoType}}
{{ $anonName := .Name }}
{{ range .Properties }}
-func register{{ pascalize $modelName }}Anon{{pascalize $anonName }}{{ pascalize .Name }}(depth int, cmdPrefix string, cmd *cobra.Command) error {
+func register{{ pascalize $modelName }}PropAnon{{pascalize $anonName }}{{ pascalize .Name }}(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
@@ -117,7 +115,7 @@ func register{{ pascalize $modelName }}Anon{{pascalize $anonName }}{{ pascalize
{{/*register functions for each fields in this model */}}
{{ range .Properties }}
-func register{{ pascalize $modelName }}{{ pascalize .Name }}(depth int, cmdPrefix string, cmd *cobra.Command) error{
+func register{{ pascalize $modelName }}Prop{{ pascalize .Name }}(depth int, cmdPrefix string, cmd *cobra.Command) error{
if depth > maxDepth {
return nil
}
@@ -133,11 +131,11 @@ func retrieveModel{{pascalize $modelName }}Flags(depth int, m *{{if containsPkgS
{{- if not .IsAnonymous }}{{/* named type composition */}}
{{ if or .IsPrimitive .IsComplexObject }}
// retrieve model {{.GoType}}
- err, {{camelize .Name }}Added := retrieveModel{{ pascalize (dropPackage .GoType) }}Flags(depth, &m.{{pascalize (dropPackage .GoType) }}, cmdPrefix, cmd)
+ err, {{pascalize .Name }}Added := retrieveModel{{ pascalize (dropPackage .GoType) }}Flags(depth, &m.{{pascalize (dropPackage .GoType) }}, cmdPrefix, cmd)
if err != nil{
return err, false
}
- retAdded = retAdded || {{camelize .Name }}Added
+ retAdded = retAdded || {{pascalize .Name }}Added
{{ else }} {{/*inline anonymous case*/}}
{{ end }}
@@ -145,20 +143,20 @@ func retrieveModel{{pascalize $modelName }}Flags(depth int, m *{{if containsPkgS
// retrieve allOf {{.Name}} fields
{{ $anonName := .Name }}
{{ range .Properties }}
- err, {{camelize .Name}}Added := retrieve{{ pascalize $modelName }}Anon{{pascalize $anonName }}{{ pascalize .Name }}Flags(depth, m, cmdPrefix, cmd)
+ err, {{pascalize .Name}}Added := retrieve{{ pascalize $modelName }}PropAnon{{pascalize $anonName }}{{ pascalize .Name }}Flags(depth, m, cmdPrefix, cmd)
if err != nil{
return err, false
}
- retAdded = retAdded || {{ camelize .Name }}Added
+ retAdded = retAdded || {{ pascalize .Name }}Added
{{ end }}
{{- end }}
{{ end }}
{{ range .Properties }}
- err, {{ camelize .Name }}Added := retrieve{{pascalize $modelName }}{{pascalize .Name }}Flags(depth, m, cmdPrefix, cmd)
+ err, {{ pascalize .Name }}Added := retrieve{{pascalize $modelName }}Prop{{pascalize .Name }}Flags(depth, m, cmdPrefix, cmd)
if err != nil{
return err, false
}
- retAdded = retAdded || {{ camelize .Name }}Added
+ retAdded = retAdded || {{ pascalize .Name }}Added
{{ end }}
return nil, retAdded
}
@@ -168,7 +166,7 @@ func retrieveModel{{pascalize $modelName }}Flags(depth int, m *{{if containsPkgS
// define retrieve functions for fields for inline definition name {{ .Name }}
{{ $anonName := .Name }}
{{ range .Properties }} {{/*anonymous fields will be registered directly on parent model*/}}
-func retrieve{{ pascalize $modelName }}Anon{{pascalize $anonName }}{{ pascalize .Name }}Flags(depth int, m *{{if containsPkgStr $modelType}}{{ $modelType }}{{else}}{{ $modelPkg }}.{{$modelType}}{{ end }},cmdPrefix string, cmd *cobra.Command) (error,bool) {
+func retrieve{{ pascalize $modelName }}PropAnon{{pascalize $anonName }}{{ pascalize .Name }}Flags(depth int, m *{{if containsPkgStr $modelType}}{{ $modelType }}{{else}}{{ $modelPkg }}.{{$modelType}}{{ end }},cmdPrefix string, cmd *cobra.Command) (error,bool) {
if depth > maxDepth {
return nil, false
}
@@ -181,7 +179,7 @@ func retrieve{{ pascalize $modelName }}Anon{{pascalize $anonName }}{{ pascalize
{{ end }}
{{ range .Properties }}
-func retrieve{{pascalize $modelName }}{{pascalize .Name }}Flags(depth int, m *{{if $modelPkg}}{{$modelPkg}}.{{ dropPackage $modelType }}{{else}}{{ $modelType }}{{end}}, cmdPrefix string, cmd *cobra.Command) (error, bool) {
+func retrieve{{pascalize $modelName }}Prop{{pascalize .Name }}Flags(depth int, m *{{if $modelPkg}}{{$modelPkg}}.{{ dropPackage $modelType }}{{else}}{{ $modelType }}{{end}}, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
@@ -190,4 +188,4 @@ func retrieve{{pascalize $modelName }}{{pascalize .Name }}Flags(depth int, m *{{
return nil, retAdded
}
{{ end }} {{/*properties*/}}
-{{ end }} {{/*define*/}}
\ No newline at end of file
+{{ end }} {{/*define*/}}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/client.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/client.gotmpl
index 3d01e9dccf5..85a99637248 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/client.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/client.gotmpl
@@ -16,6 +16,7 @@ import (
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
@@ -29,6 +30,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new {{ humanize .Name }} API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new {{ humanize .Name }} API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client {{ if .Summary }}{{ .Summary }}{{ if .Description }}
@@ -39,9 +65,58 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
+{{- with .ClientOptions }}{{/* use ad'hoc function mediaGoName rather than pascalize because of patterns with * */}}
+
+// This client is generated with a few options you might find useful for your swagger spec.
+//
+// Feel free to add you own set of options.
+ {{- if gt (len .ConsumesMediaTypes) 1 }}
+
+// WithContentType allows the client to force the Content-Type header
+// to negotiate a specific Consumer from the server.
+//
+// You may use this option to set arbitrary extensions to your MIME media type.
+func WithContentType(mime string) ClientOption {
+ return func(r *runtime.ClientOperation) {
+ r.ConsumesMediaTypes = []string{mime}
+ }
+}
+ {{ range .ConsumesMediaTypes }}
+ {{- if not ( eq (mediaGoName .) "" ) }}{{/* guard: in case garbled input produces a (conflicting) empty name */}}
+
+// WithContentType{{ mediaGoName . }} sets the Content-Type header to {{ printf "%q" . }}.
+func WithContentType{{ mediaGoName . }}(r *runtime.ClientOperation) {
+ r.ConsumesMediaTypes = []string{ {{ printf "%q" . }} }
+}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if gt (len .ProducesMediaTypes) 1 }}
+
+// WithAccept allows the client to force the Accept header
+// to negotiate a specific Producer from the server.
+//
+// You may use this option to set arbitrary extensions to your MIME media type.
+func WithAccept(mime string) ClientOption {
+ return func(r *runtime.ClientOperation) {
+ r.ProducesMediaTypes = []string{mime}
+ }
+}
+ {{ range .ProducesMediaTypes }}
+ {{- if not ( eq (mediaGoName .) "" ) }}{{/* guard: in case garbled input produces a (conflicting) empty name */}}
+
+// WithAccept{{ mediaGoName . }} sets the Accept header to {{ printf "%q" . }}.
+func WithAccept{{ mediaGoName . }}(r *runtime.ClientOperation) {
+ r.ProducesMediaTypes = []string{ {{ printf "%q" . }} }
+}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
// ClientService is the interface for Client methods
type ClientService interface {
{{ range .Operations }}
@@ -124,4 +199,4 @@ func (a *Client) {{ pascalize .Name }}(params *{{ pascalize .Name }}Params{{ if
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
-}
\ No newline at end of file
+}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/response.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/response.gotmpl
index d622385407c..dce21aa2f9c 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/response.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/client/response.gotmpl
@@ -119,11 +119,17 @@ func ({{ .ReceiverName }} *{{ pascalize .Name }}) Code() int {
}
func ({{ .ReceiverName }} *{{ pascalize .Name }}) Error() string {
- return fmt.Sprintf("[{{ upper .Method }} {{ .Path }}][%d] {{ if .Name }}{{ .Name }} {{ else }}unknown error {{ end }}{{ if .Schema }} %+v{{ end }}", {{ if eq .Code -1 }}{{ .ReceiverName }}._statusCode{{ else }}{{ .Code }}{{ end }}{{ if .Schema }}, o.Payload{{ end }})
+ {{- if .Schema }}{{ if (not .Schema.IsStream) }}
+ payload, _ := json.Marshal(o.Payload)
+ {{- end }}{{- end }}
+ return fmt.Sprintf("[{{ upper .Method }} {{ .Path }}][%d]{{ if .Name }} {{ .Name }}{{ else }} unknown error{{ end }}{{ if .Schema }}{{ if not .Schema.IsStream }} %s{{ end }}{{ end }}", {{ if eq .Code -1 }}{{ .ReceiverName }}._statusCode{{ else }}{{ .Code }}{{ end }}{{ if .Schema }}{{ if not .Schema.IsStream }}, payload{{ end }}{{ end }})
}
func ({{ .ReceiverName }} *{{ pascalize .Name }}) String() string {
- return fmt.Sprintf("[{{ upper .Method }} {{ .Path }}][%d] {{ if .Name }}{{ .Name }} {{ else }}unknown response {{ end }}{{ if .Schema }} %+v{{ end }}", {{ if eq .Code -1 }}{{ .ReceiverName }}._statusCode{{ else }}{{ .Code }}{{ end }}{{ if .Schema }}, o.Payload{{ end }})
+ {{- if .Schema }}{{ if (not .Schema.IsStream) }}
+ payload, _ := json.Marshal(o.Payload)
+ {{- end }}{{- end }}
+ return fmt.Sprintf("[{{ upper .Method }} {{ .Path }}][%d]{{ if .Name }} {{ .Name }}{{ else }} unknown response{{ end }}{{ if .Schema }}{{ if not .Schema.IsStream }} %s{{ end }}{{ end }}", {{ if eq .Code -1 }}{{ .ReceiverName }}._statusCode{{ else }}{{ .Code }}{{ end }}{{ if .Schema }}{{ if not .Schema.IsStream }}, payload{{ end }}{{ end }})
}
{{ if .Schema }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/README.md b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/README.md
index 1d36d66f5c5..9ef4fea805f 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/README.md
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/README.md
@@ -71,7 +71,7 @@ type PetAPI interface {
PetUpdate(ctx context.Context, params pet.PetUpdateParams) middleware.Responder
}
-//go:generate mockery -name StoreAPI -inpkg
+//go:generate mockery --name StoreAPI --inpackage
// StoreAPI
type StoreAPI interface {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/server/configureapi.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/server/configureapi.gotmpl
index eaee9701f8e..03f8d67451c 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/server/configureapi.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/contrib/stratoscale/server/configureapi.gotmpl
@@ -29,7 +29,7 @@ type contextKey string
const AuthKey contextKey = "Auth"
{{ range .OperationGroups -}}
-//go:generate mockery -name {{ pascalize .Name}}API -inpkg
+//go:generate mockery --name {{ pascalize .Name}}API --inpackage
/* {{ pascalize .Name }}API {{ .Description }} */
type {{ pascalize .Name }}API interface {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/docstring.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/docstring.gotmpl
index 8e7108be1a2..a0a9d123ec8 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/docstring.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/docstring.gotmpl
@@ -10,13 +10,13 @@
{{- else }}
{{- humanize .Name }}
{{- end }}
- {{- if or .MinProperties .MinProperties }}
+ {{- if or .MinProperties .MaxProperties }}
//
{{- if .MinProperties }}
-// Min Properties: {{ .MinProperties }}
+// MinProperties: {{ .MinProperties }}
{{- end }}
{{- if .MaxProperties }}
-// Max Properties: {{ .MaxProperties }}
+// MaxProperties: {{ .MaxProperties }}
{{- end }}
{{- end }}
{{- if .Example }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/markdown/docs.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/markdown/docs.gotmpl
index 8b7c6b3dd16..79461d1d013 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/markdown/docs.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/markdown/docs.gotmpl
@@ -2,18 +2,18 @@
{{- with .ExternalDocs }}
{{- if .URL }}
{{- if .Description }}
-> [{{ trimSpace .Description }}]({{ .URL }})
+> [{{ mdBlock .Description }}]({{ .URL }})
{{- else }}
> [Read more]({{ .URL }})
{{- end }}
{{- else }}
-> {{ trimSpace .Description }}
+> {{ mdBlock .Description }}
{{- end }}
{{- end }}
{{- end }}
{{- define "docParam" }}{{/* renders a parameter with simple schema */}}
-| {{ .Name }} | `{{ .Location }}` | {{ paramDocType . }} | `{{ .GoType }}` | {{ if .CollectionFormat }}`{{ docCollectionFormat .CollectionFormat .Child }}`{{ end }} | {{ if .Required }}✓{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }} | {{ trimSpace .Description }} |
+| {{ .Name }} | `{{ .Location }}` | {{ paramDocType . }} | `{{ .GoType }}` | {{ if .CollectionFormat }}`{{ docCollectionFormat .CollectionFormat .Child }}`{{ end }} | {{ if .Required }}✓{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }} | {{ mdBlock .Description }} |
{{- end }}
{{- define "docModelSchema" }}{{/* renders a schema */}}
@@ -46,7 +46,7 @@
{{- else if and .IsAliased .IsPrimitive (not .IsSuperAlias) -}}
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| {{ .Name }} | {{ schemaDocType . }}| {{ .AliasedType }} | {{ if .Default }}`{{ json .Default }}`{{ end }}| {{ trimSpace .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
+| {{ .Name }} | {{ schemaDocType . }}| {{ .AliasedType }} | {{ if .Default }}`{{ json .Default }}`{{ end }}| {{ mdBlock .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
{{ printf "\n" }}
{{- else if or (and .IsAliased (not (.IsAdditionalProperties))) (and .IsComplexObject (not .Properties) (not .AllOf)) -}}
[{{- dropPackage .GoType }}](#{{ dasherize (dropPackage .GoType) -}})
@@ -71,7 +71,7 @@ any
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
{{- range .Properties }}
-| {{ .Name }} | {{ template "docSchemaSimple" . }}| `{{ .GoType }}` | {{ if .Required }}✓{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }}| {{ trimSpace .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
+| {{ .Name }} | {{ template "docSchemaSimple" . }}| `{{ .GoType }}` | {{ if .Required }}✓{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }}| {{ mdBlock .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
{{- end }}
{{ printf "\n" }}
{{- end }}
@@ -86,7 +86,7 @@ any
| Type | Go type | Default | Description | Example |
|------|---------| ------- |-------------|---------|
-| {{ template "docSchemaSimple" . }} | `{{ .GoType }}` |{{ if .Default }}`{{ json .Default }}`{{ end }}| {{ trimSpace .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
+| {{ template "docSchemaSimple" . }} | `{{ .GoType }}` |{{ if .Default }}`{{ json .Default }}`{{ end }}| {{ mdBlock .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
{{- else }}
{{ template "docModelSchema" . }}
@@ -104,7 +104,7 @@ any
| Type | Go type | Default | Description | Example |
|------|---------| ------- |-------------|---------|
-| {{ template "docSchemaSimple" . }} | `{{ .GoType }}` |{{ if .Default }}`{{ json .Default }}`{{ end }}| {{ trimSpace .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
+| {{ template "docSchemaSimple" . }} | `{{ .GoType }}` |{{ if .Default }}`{{ json .Default }}`{{ end }}| {{ mdBlock .Description }} | {{ if .Example }}`{{ .Example }}`{{ end }} |
{{- else }}
{{ template "docModelSchema" . }}
@@ -161,7 +161,7 @@ any
{{- end }}
{{- define "docModelBodyParam" }}{{/* layout for body param schema */}}
-| {{ .Name }} | `body` | {{ template "docSchemaSimple" .Schema }} | `{{ .Schema.GoType }}` | | {{ if .Required }}✓{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }}| {{ trimSpace .Description }} |
+| {{ .Name }} | `body` | {{ template "docSchemaSimple" .Schema }} | `{{ .Schema.GoType }}` | | {{ if .Required }}✓{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }}| {{ mdBlock .Description }} |
{{- end }}
{{- define "docHeaders" }}{{/* renders response headers */}}
@@ -169,7 +169,7 @@ any
| Name | Type | Go type | Separator | Default | Description |
|------|------|---------|-----------|---------|-------------|
{{- range .Headers }}
-| {{ .Name }} | {{ headerDocType . }} | `{{ .GoType }}` | {{ if .CollectionFormat }}`{{ docCollectionFormat .CollectionFormat .Child }}`{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }} | {{ trimSpace .Description }} |
+| {{ .Name }} | {{ headerDocType . }} | `{{ .GoType }}` | {{ if .CollectionFormat }}`{{ docCollectionFormat .CollectionFormat .Child }}`{{ end }} | {{ if .Default }}`{{ json .Default }}`{{ end }} | {{ mdBlock .Description }} |
{{- end }}
{{- end }}
{{- end }}
@@ -350,7 +350,7 @@ Name | Description
{{- range .Operations }}
{{- $opname := .Name }}
-### {{ if .Summary }}{{ trimSpace .Summary }}{{ else }}{{ humanize .Name }}{{ end }} (*{{ .Name }}*)
+### {{ if .Summary }}{{ mdBlock .Summary }}{{ else }}{{ humanize .Name }}{{ end }} (*{{ .Name }}*)
```
{{ upper .Method }} {{ joinPath .BasePath .Path }}
@@ -424,16 +424,16 @@ Name | Description
| Code | Status | Description | Has headers | Schema |
|------|--------|-------------|:-----------:|--------|
{{- range .Responses }}
-| [{{.Code}}](#{{ dasherize $opname }}-{{ .Code }}) | {{ httpStatus .Code }} | {{ trimSpace .Description }} | {{ if .Headers }}✓{{ end }} | [schema](#{{ dasherize $opname }}-{{ .Code }}-schema) |
+| [{{.Code}}](#{{ dasherize $opname }}-{{ .Code }}) | {{ httpStatus .Code }} | {{ mdBlock .Description }} | {{ if .Headers }}✓{{ end }} | [schema](#{{ dasherize $opname }}-{{ .Code }}-schema) |
{{- end }}
{{- with .DefaultResponse }}
-| [default](#{{ dasherize $opname }}-default) | | {{ trimSpace .Description }} | {{ if .Headers }}✓{{ end }} | [schema](#{{ dasherize $opname }}-default-schema) |
+| [default](#{{ dasherize $opname }}-default) | | {{ mdBlock .Description }} | {{ if .Headers }}✓{{ end }} | [schema](#{{ dasherize $opname }}-default-schema) |
{{- end }}
#### Responses
{{ range .Responses }}
-##### {{.Code}}{{ if .Description }} - {{ trimSpace .Description }}{{ end }}
+##### {{.Code}}{{ if .Description }} - {{ mdBlock .Description }}{{ end }}
Status: {{ httpStatus .Code }}
###### Schema
@@ -462,7 +462,7 @@ Status: {{ httpStatus .Code }}
{{- with .DefaultResponse }}
##### Default Response
-{{ trimSpace .Description }}
+{{ mdBlock .Description }}
###### Schema
{{- if .Schema }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/schemavalidator.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/schemavalidator.gotmpl
index 61684acd041..cee8a5dd644 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/schemavalidator.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/schemavalidator.gotmpl
@@ -1,6 +1,6 @@
{{ define "primitivefieldcontextvalidator" }}
{{ if .ReadOnly }}
- if err := validate.ReadOnly(ctx, {{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil{
+ if err := validate.ReadOnly(ctx, {{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil{
return err
}
{{ end }}
@@ -8,25 +8,25 @@
{{ define "primitivefieldvalidator" }}
{{ if .Required }}
{{- if and (eq .GoType "string") (not .IsNullable) }}
- if err := validate.RequiredString({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsAliased }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if .IsAliased }}){{ end }}); err != nil {
+ if err := validate.RequiredString({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsAliased }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if .IsAliased }}){{ end }}); err != nil {
{{- else }}
- if err := validate.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
{{- end }}
return err
}
{{- end }}
{{ if .MinLength }}
- if err := validate.MinLength({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MinLength }}); err != nil {
+ if err := validate.MinLength({{ path . }}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MinLength }}); err != nil {
return err
}
{{- end }}
{{ if .MaxLength }}
- if err := validate.MaxLength({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MaxLength }}); err != nil {
+ if err := validate.MaxLength({{ path . }}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MaxLength }}); err != nil {
return err
}
{{ end }}
{{ if .Pattern }}
- if err := validate.Pattern({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ToString }}, `{{ escapeBackticks .Pattern }}`); err != nil {
+ if err := validate.Pattern({{ path . }}, {{ printf "%q" .Location }}, {{ .ToString }}, `{{ escapeBackticks .Pattern }}`); err != nil {
return err
}
{{- end }}
@@ -41,7 +41,7 @@
{{ end }}
{{ if .Enum }}
// value enum
- if err := {{.ReceiverName }}.validate{{ pascalize .Name }}{{ .Suffix }}Enum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}); err != nil {
+ if err := {{.ReceiverName }}.validate{{ pascalize .Name }}{{ .Suffix }}Enum({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}); err != nil {
return err
}
{{- end }}
@@ -52,7 +52,7 @@
{{ define "slicecontextvalidator" }}
{{ if .ReadOnly }}
- if err := validate.ReadOnly(ctx, {{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil{
+ if err := validate.ReadOnly(ctx, {{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil{
return err
}
{{ end }}
@@ -71,9 +71,9 @@
{{- end }}
if err := {{.ValueExpression }}.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ve.ValidateName({{ path . }})
} else if ce, ok := err.(*errors.CompositeError); ok {
- return ce.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ce.ValidateName({{ path . }})
}
return err
}
@@ -86,7 +86,7 @@
{{define "slicevalidator" }}
{{ if .Required }}
- if err := validate.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ValueExpression }}); err != nil {
+ if err := validate.Required({{ path . }}, {{ printf "%q" .Location }}, {{ .ValueExpression }}); err != nil {
return err
}
{{ end }}
@@ -94,23 +94,23 @@
{{ .IndexVar }}{{ pascalize .Name }}Size := int64(len({{.ValueExpression }}))
{{ end }}
{{ if .MinItems }}
- if err := validate.MinItems({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .IndexVar }}{{ pascalize .Name }}Size, {{.MinItems }}); err != nil {
+ if err := validate.MinItems({{ path . }}, {{ printf "%q" .Location }}, {{ .IndexVar }}{{ pascalize .Name }}Size, {{.MinItems }}); err != nil {
return err
}
{{ end }}
{{ if .MaxItems }}
- if err := validate.MaxItems({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .IndexVar }}{{ pascalize .Name }}Size, {{.MaxItems }}); err != nil {
+ if err := validate.MaxItems({{ path . }}, {{ printf "%q" .Location }}, {{ .IndexVar }}{{ pascalize .Name }}Size, {{.MaxItems }}); err != nil {
return err
}
{{ end }}
{{ if .UniqueItems }}
- if err := validate.UniqueItems({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{.ValueExpression }}); err != nil {
+ if err := validate.UniqueItems({{ path . }}, {{ printf "%q" .Location }}, {{.ValueExpression }}); err != nil {
return err
}
{{ end }}
{{ if .Enum }}
// for slice
- if err := {{.ReceiverName }}.validate{{ pascalize .Name }}Enum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{.ValueExpression }}); err != nil {
+ if err := {{.ReceiverName }}.validate{{ pascalize .Name }}Enum({{ path . }}, {{ printf "%q" .Location }}, {{.ValueExpression }}); err != nil {
return err
}
{{ end }}
@@ -138,9 +138,9 @@
{{- end }}
if err := {{.ValueExpression }}.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ve.ValidateName({{ path . }})
} else if ce, ok := err.(*errors.CompositeError); ok {
- return ce.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ce.ValidateName({{ path . }})
}
return err
}
@@ -154,10 +154,10 @@
{{- if and .Required }}
{{- if or .IsNullable .IsInterface }}
if {{ .ReceiverName }}.{{ pascalize .Name }} == nil {
- return errors.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, nil)
+ return errors.Required({{ path . }}, {{ printf "%q" .Location }}, nil)
}
{{- else }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{ .ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{ .ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -213,7 +213,7 @@
{{ template "mapcontextvalidator" . }}
{{- else if and .IsMap .IsInterface }}
{{ if .Enum }}
- if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}ValueEnum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
+ if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}ValueEnum({{ path . }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
return err
}
{{- end }}
@@ -278,10 +278,10 @@
{{- if and .Required }}
{{- if or .IsNullable .IsInterface }}
if {{ .ReceiverName }}.{{ pascalize .Name }} == nil {
- return errors.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, nil)
+ return errors.Required({{ path . }}, {{ printf "%q" .Location }}, nil)
}
{{- else }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{ .ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{ .ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -295,12 +295,12 @@
{{- if .IsInterface }}
if {{ $validatedValues }}[{{ $keyVar }}] == nil { // not required
{{- else }}
- if swag.IsZero({{ $validatedValues }}[{{ $keyVar }}]) { // not required
+ if swag.IsZero({{ .ValueExpression }}) { // not required
{{- end }}
continue
}
{{- else if and (.Required) (not .IsArray) }}{{/* Required slice is processed below */}}
- if err := validate.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
+ if err := validate.Required({{ path . }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
return err
}
{{- end }}
@@ -313,9 +313,9 @@
{{- end }}
if err := val.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ve.ValidateName({{ path . }})
} else if ce, ok := err.(*errors.CompositeError); ok {
- return ce.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ce.ValidateName({{ path . }})
}
return err
}
@@ -342,7 +342,7 @@
{{- end }}
{{- else if and .IsCustomFormatter (or .HasValidations .Required) }}{{/* custom format not captured as primitive */}}
{{- if .Required }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -352,15 +352,16 @@
{{- else if .IsArray }}
{{ template "slicevalidator" . }}
{{- else if and .IsMap (not .IsInterface) }}
+ {{ template "minmaxProperties" .}}
{{ template "mapvalidator" . }}
{{ if .Enum }}
- if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}ValueEnum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
+ if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}ValueEnum({{ path . }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
return err
}
{{- end }}
{{- else if and .IsMap .IsInterface }}
{{ if .Enum }}
- if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}ValueEnum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
+ if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}ValueEnum({{ path . }}, {{ printf "%q" .Location }}, {{ $validatedValues }}[{{ $keyVar }}]); err != nil {
return err
}
{{- end }}
@@ -372,9 +373,9 @@
{{- end }}
if err := val.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ve.ValidateName({{ path . }})
} else if ce, ok := err.(*errors.CompositeError); ok {
- return ce.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ce.ValidateName({{ path . }})
}
return err
}
@@ -402,7 +403,7 @@
{{ end }}
{{ if .Enum }}
// from map
- if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}Enum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ValueExpression }}); err != nil {
+ if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}Enum({{ path . }}, {{ printf "%q" .Location }}, {{ .ValueExpression }}); err != nil {
return err
}
{{ end }}
@@ -430,6 +431,11 @@
{{- end }}
{{- end }}
{{- end }}
+ {{- else if .Enum }}
+ // from map without additionalProperties
+ if err := {{ .ReceiverName }}.validate{{ pascalize .Name }}Enum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ValueExpression }}); err != nil {
+ return err
+ }
{{- end }}
{{ end }}
@@ -462,9 +468,9 @@
{{ end }}
if err := {{.ValueExpression }}.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ve.ValidateName({{ path . }})
} else if ce, ok := err.(*errors.CompositeError); ok {
- return ce.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ce.ValidateName({{ path . }})
}
return err
}
@@ -490,14 +496,14 @@
{{ define "minmaxProperties" }}
{{- if and (or .IsMap (and .IsAdditionalProperties .HasAdditionalProperties)) (or .MinProperties .MaxProperties) }}
{{- if and (not .IsAdditionalProperties) (not .IsInterface) (eq (len .Properties) 0) }}{{/* map only */}}
- nprops := len({{ if and (not .IsAliased) .HasAdditionalProperties }}{{ .ReceiverName }}{{ else }}{{ .ValueExpression }}{{ end }})
+ nprops := len({{ if and .IsMap (not .IsAliased) .HasAdditionalProperties (not .IsElem) (not .IsProperty) }}{{ .ReceiverName }}{{ else }}{{ .ValueExpression }}{{ end }})
{{- else }}{{/* object with properties */}}
{{- if and .IsNullable .MinProperties }}
{{- if gt0 .MinProperties }}
// short circuits minProperties > 0
if {{ .ReceiverName }} == nil {
- return errors.TooFewProperties({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf "%q" .Location }}, {{ .MinProperties }})
+ return errors.TooFewProperties({{ path . }}, {{ printf "%q" .Location }}, {{ .MinProperties }})
}
{{- end }}
{{- end }}
@@ -517,13 +523,13 @@
{{ if .MinProperties }}
// minProperties: {{ .MinProperties }}
if nprops < {{ .MinProperties }} {
- return errors.TooFewProperties({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf "%q" .Location }}, {{ .MinProperties }})
+ return errors.TooFewProperties({{ path . }}, {{ printf "%q" .Location }}, {{ .MinProperties }})
}
{{- end }}
{{ if .MaxProperties }}
// maxProperties: {{ .MaxProperties }}
if nprops > {{ .MaxProperties }} {
- return errors.TooManyProperties({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf "%q" .Location }}, {{ .MaxProperties }})
+ return errors.TooManyProperties({{ path . }}, {{ printf "%q" .Location }}, {{ .MaxProperties }})
}
{{- end }}
{{- end }}
@@ -548,7 +554,7 @@
*/}}
{{- if not .IsAnonymous }}
{{- if and .Required (or .IsNullable .IsBaseType .IsMap) }}
- if err := validate.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{.ValueExpression }}); err != nil {
+ if err := validate.Required({{ path . }}, {{ printf "%q" .Location }}, {{.ValueExpression }}); err != nil {
return err
}
{{- if and (not .Required) .IsBaseType }}
@@ -563,9 +569,9 @@
{{- end }}
if err := {{.ValueExpression }}.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ve.ValidateName({{ path . }})
} else if ce, ok := err.(*errors.CompositeError); ok {
- return ce.ValidateName({{ if .Path }}{{ .Path }}{{ else }}""{{ end }})
+ return ce.ValidateName({{ path . }})
}
return err
}
@@ -602,7 +608,7 @@
// at https://github.com/go-swagger/go-swagger/issues
{{- if .ReadOnly }}
- if err := validate.ReadOnly{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.ReadOnly{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -625,7 +631,7 @@
{{- if .IsPrimitive }}
{{- if .IsAliased }}
{{- if and .Required (not .IsAnonymous) }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -635,7 +641,7 @@
{{- end }}
{{- else if and .IsCustomFormatter (or .HasValidations .Required) }}{{/* custom format not captured as primitive */}}
{{- if .Required }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -651,10 +657,10 @@
{{- if and .IsAdditionalProperties .Required (not .IsAliased) }}
{{- if or .IsNullable .IsInterface }}
if {{ .ValueExpression }} == nil {
- return errors.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ValueExpression }})
+ return errors.Required({{ path . }}, {{ printf "%q" .Location }}, {{ .ValueExpression }})
}
{{- else }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -663,10 +669,10 @@
{{- else if and .IsExternal .Required }}
{{- if or .IsNullable .IsInterface }}
if {{ .ValueExpression }} == nil {
- return errors.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ValueExpression }})
+ return errors.Required({{ path . }}, {{ printf "%q" .Location }}, {{ .ValueExpression }})
}
{{- else }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -697,7 +703,7 @@
{{ template "primitivefieldvalidator" . }}
{{- else if and .IsCustomFormatter (or .HasValidations .Required) }}{{/* custom format not captured as primitive */}}
{{- if .Required }}
- if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
+ if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}({{ path . }}, {{ printf "%q" .Location }}, {{ if not (or .IsAnonymous .IsNullable) }}{{ .GoType }}({{ end }}{{.ValueExpression }}{{ if not (or .IsAnonymous .IsNullable) }}){{ end }}); err != nil {
return err
}
{{- end }}
@@ -1034,11 +1040,11 @@ func ({{.ReceiverName }} *{{ if $.Discriminates }}{{ camelize $.Name }}{{ else i
{{- if and $.IsTuple .IsMap .Required }}
{{- if .IsInterface }}
if {{ .ValueExpression }} == nil {
- return errors.Required({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ .ValueExpression }})
+ return errors.Required({{ path . }}, {{ printf "%q" .Location }}, {{ .ValueExpression }})
}
{{- else }}
if err := validate.Required{{ if and (eq .GoType "string") (not .IsNullable) }}String{{ end }}(
- {{- if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }},
+ {{ path . }}, {{ printf "%q" .Location }},
{{- if and (eq .GoType "string") (not (or .IsAnonymous .IsNullable)) }}{{ .GoType }}({{ end }}
{{- .ValueExpression }}
{{- if and (eq .GoType "string") (not (or .IsAnonymous .IsNullable)) }}){{ end }}); err != nil {
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/server/server.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/server/server.gotmpl
index c78d22051a7..ccbc520b0a0 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/server/server.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/server/server.gotmpl
@@ -230,7 +230,7 @@ type Server struct {
ListenLimit int{{ if .UseGoStructFlags }} `long:"listen-limit" description:"limit the number of outstanding requests"`{{ end }}
KeepAlive time.Duration{{ if .UseGoStructFlags }} `long:"keep-alive" description:"sets the TCP keep-alive timeouts on accepted connections. It prunes dead TCP connections ( e.g. closing laptop mid-download)" default:"3m"`{{ end }}
ReadTimeout time.Duration{{ if .UseGoStructFlags }} `long:"read-timeout" description:"maximum duration before timing out read of the request" default:"30s"`{{ end }}
- WriteTimeout time.Duration{{ if .UseGoStructFlags }} `long:"write-timeout" description:"maximum duration before timing out write of the response" default:"60s"`{{ end }}
+ WriteTimeout time.Duration{{ if .UseGoStructFlags }} `long:"write-timeout" description:"maximum duration before timing out write of the response" default:"30s"`{{ end }}
httpServerL net.Listener
TLSHost string{{ if .UseGoStructFlags }} `long:"tls-host" description:"the IP to listen on for tls, when not specified it's the same as --host" env:"TLS_HOST"`{{ end }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/customformat.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/customformat.gotmpl
index 354075a9081..a69e2d7a7e1 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/customformat.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/customformat.gotmpl
@@ -1,3 +1,3 @@
-if err := validate.FormatOf({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ printf "%q" .SwaggerFormat }}, {{ .ToString }}, formats); err != nil {
+if err := validate.FormatOf({{ path . }}, {{ printf "%q" .Location }}, {{ printf "%q" .SwaggerFormat }}, {{ .ToString }}, formats); err != nil {
return err
}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/maximum.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/maximum.gotmpl
index 993f7344f56..fb8a2b3debe 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/maximum.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/maximum.gotmpl
@@ -1,21 +1,21 @@
{{- if or (hasPrefix .UnderlyingType "int") }}
{{- if and (hasPrefix .UnderlyingType "int64") (not .IsAliased) }}
-if err := validate.MaximumInt({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
+if err := validate.MaximumInt({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
{{- else }}
-if err := validate.MaximumInt({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, int64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
+if err := validate.MaximumInt({{ path . }}, {{ printf "%q" .Location }}, int64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
{{- end }}
{{- else }}
{{- if hasPrefix .UnderlyingType "uint" }}
{{- if and (hasPrefix .UnderlyingType "uint64") (not .IsAliased) }}
-if err := validate.MaximumUint({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
+if err := validate.MaximumUint({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
{{- else }}
-if err := validate.MaximumUint({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, uint64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
+if err := validate.MaximumUint({{ path . }}, {{ printf "%q" .Location }}, uint64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
{{- end }}
{{- else }}
{{- if and (eq .UnderlyingType "float64") (not .IsAliased) }}
-if err := validate.Maximum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
+if err := validate.Maximum({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
{{- else }}
-if err := validate.Maximum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, float64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
+if err := validate.Maximum({{ path . }}, {{ printf "%q" .Location }}, float64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Maximum }}, {{.ExclusiveMaximum }}); err != nil {
{{- end }}
{{- end }}
{{- end }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/minimum.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/minimum.gotmpl
index 626c207cb09..fda5ba4e041 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/minimum.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/minimum.gotmpl
@@ -1,21 +1,21 @@
{{- if hasPrefix .UnderlyingType "int" }}
{{- if and (hasPrefix .UnderlyingType "int64") (not .IsAliased) }}
-if err := validate.MinimumInt({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
+if err := validate.MinimumInt({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
{{- else }}
-if err := validate.MinimumInt({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, int64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
+if err := validate.MinimumInt({{ path . }}, {{ printf "%q" .Location }}, int64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
{{- end }}
{{- else }}
{{- if hasPrefix .UnderlyingType "uint" }}
{{- if and (hasPrefix .UnderlyingType "uint64") (not .IsAliased) }}
-if err := validate.MinimumUint({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
+if err := validate.MinimumUint({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
{{- else }}
-if err := validate.MinimumUint({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, uint64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
+if err := validate.MinimumUint({{ path . }}, {{ printf "%q" .Location }}, uint64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
{{- end }}
{{- else }}
{{- if and (eq .UnderlyingType "float64") (not .IsAliased) }}
-if err := validate.Minimum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
+if err := validate.Minimum({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
{{- else }}
-if err := validate.Minimum({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, float64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
+if err := validate.Minimum({{ path . }}, {{ printf "%q" .Location }}, float64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.Minimum }}, {{.ExclusiveMinimum }}); err != nil {
{{- end }}
{{- end }}
{{- end }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/multipleOf.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/multipleOf.gotmpl
index 28796852dd6..c48e75e3903 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/multipleOf.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/multipleOf.gotmpl
@@ -1,21 +1,21 @@
{{- if and (hasPrefix .UnderlyingType "int") (isInteger .MultipleOf) }}{{/* if the type is an integer, but the multiple factor is not, fall back to the float64 version of the validator */}}
{{- if and (hasPrefix .UnderlyingType "int64") (not .IsAliased) }}
-if err := validate.MultipleOfInt({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.MultipleOf }}); err != nil {
+if err := validate.MultipleOfInt({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.MultipleOf }}); err != nil {
{{- else }}
-if err := validate.MultipleOfInt({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, int64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.MultipleOf }}); err != nil {
+if err := validate.MultipleOfInt({{ path . }}, {{ printf "%q" .Location }}, int64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.MultipleOf }}); err != nil {
{{- end }}
{{- else }}
{{- if and (hasPrefix .UnderlyingType "uint") (isInteger .MultipleOf) }}
{{- if and (hasPrefix .UnderlyingType "uint64") (not .IsAliased) }}
-if err := validate.MultipleOfUint({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.MultipleOf }}); err != nil {
+if err := validate.MultipleOfUint({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.MultipleOf }}); err != nil {
{{- else }}
-if err := validate.MultipleOfUint({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, uint64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.MultipleOf }}); err != nil {
+if err := validate.MultipleOfUint({{ path . }}, {{ printf "%q" .Location }}, uint64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.MultipleOf }}); err != nil {
{{- end }}
{{- else }}
{{- if and (eq .UnderlyingType "float64") (not .IsAliased) }}
-if err := validate.MultipleOf({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.MultipleOf }}); err != nil {
+if err := validate.MultipleOf({{ path . }}, {{ printf "%q" .Location }}, {{ if .IsNullable }}*{{ end }}{{.ValueExpression }}, {{.MultipleOf }}); err != nil {
{{- else }}
-if err := validate.MultipleOf({{ if .Path }}{{ .Path }}{{ else }}""{{ end }}, {{ printf "%q" .Location }}, float64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.MultipleOf }}); err != nil {
+if err := validate.MultipleOf({{ path . }}, {{ printf "%q" .Location }}, float64({{ if .IsNullable }}*{{ end }}{{.ValueExpression }}), {{.MultipleOf }}); err != nil {
{{- end }}
{{- end }}
{{- end }}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/primitive.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/primitive.gotmpl
index 35238d78420..8dbf7964340 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/primitive.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/primitive.gotmpl
@@ -1,15 +1,15 @@
{{if .MinLength}}
-if err := validate.MinLength({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MinLength}}); err != nil {
+if err := validate.MinLength({{ path . }}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MinLength}}); err != nil {
return err
}
{{end}}
{{if .MaxLength}}
-if err := validate.MaxLength({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MaxLength}}); err != nil {
+if err := validate.MaxLength({{ path . }}, {{ printf "%q" .Location }}, {{ .ToString }}, {{.MaxLength}}); err != nil {
return err
}
{{end}}
{{if .Pattern}}
-if err := validate.Pattern({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf "%q" .Location }}, {{ .ToString }}, `{{escapeBackticks .Pattern}}`); err != nil {
+if err := validate.Pattern({{ path . }}, {{ printf "%q" .Location }}, {{ .ToString }}, `{{escapeBackticks .Pattern}}`); err != nil {
return err
}
{{end}}
@@ -23,7 +23,7 @@ if err := validate.Pattern({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf
{{ template "validationMultipleOf" . }}
{{end}}
{{if .Enum}}
-if err := validate.EnumCase({{ if .Path }}{{ .Path }}{{else}}""{{end}}, {{ printf "%q" .Location }}, {{ if and (not .IsArray) (not .HasDiscriminator) (not .IsInterface) .IsNullable }}*{{ end }}{{.ValueExpression}}{{ if .IsCustomFormatter }}.String(){{ end }}, {{ printf "%#v" .Enum}}, {{ if .IsEnumCI }}false{{ else }}true{{ end }}); err != nil {
+if err := validate.EnumCase({{ path . }}, {{ printf "%q" .Location }}, {{ if and (not .IsArray) (not .HasDiscriminator) (not .IsInterface) .IsNullable }}*{{ end }}{{.ValueExpression}}{{ if .IsCustomFormatter }}.String(){{ end }}, {{ printf "%#v" .Enum}}, {{ if .IsEnumCI }}false{{ else }}true{{ end }}); err != nil {
return err
}
{{end}}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/structfield.gotmpl b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/structfield.gotmpl
index 8378c461515..26fd47d2fa3 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/structfield.gotmpl
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/templates/validation/structfield.gotmpl
@@ -40,14 +40,6 @@
// Min Items: {{ .MinItems }}
{{- end }}
-{{- if .MinProperties }}
-// Min Properties: {{ .MinProperties }}
-{{- end }}
-
-{{- if .MaxProperties }}
-// Max Properties: {{ .MaxProperties }}
-{{- end }}
-
{{- if .UniqueItems }}
// Unique: true
{{- end }}
@@ -57,6 +49,6 @@
{{- end }}
{{- if .Enum }}
-// Enum: {{ printf "%v" .Enum }}
+// Enum: {{ json .Enum }}
{{- end }}
{{- end}}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/types.go b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/types.go
index d2a6a4f5e5a..59057ca8c23 100644
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/generator/types.go
+++ b/test/tools/vendor/github.com/go-swagger/go-swagger/generator/types.go
@@ -24,8 +24,8 @@ import (
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
"github.com/go-openapi/swag"
+ "github.com/go-viper/mapstructure/v2"
"github.com/kr/pretty"
- "github.com/mitchellh/mapstructure"
)
const (
@@ -345,7 +345,6 @@ func (t *typeResolver) inferAliasing(result *resolvedType, _ *spec.Schema, isAno
}
func (t *typeResolver) resolveFormat(schema *spec.Schema, isAnonymous bool, isRequired bool) (returns bool, result resolvedType, err error) {
-
if schema.Format != "" {
// defaults to string
result.SwaggerType = str
@@ -401,7 +400,6 @@ func (t *typeResolver) resolveFormat(schema *spec.Schema, isAnonymous bool, isRe
//
// The interpretation of Required as a mean to make a type nullable is carried out elsewhere.
func (t *typeResolver) isNullable(schema *spec.Schema) bool {
-
if nullable, ok := t.isNullableOverride(schema); ok {
return nullable
}
@@ -1000,8 +998,8 @@ func warnSkipValidation(types interface{}) func(string, interface{}) {
func guardValidations(tpe string, schema interface {
Validations() spec.SchemaValidations
SetValidations(spec.SchemaValidations)
-}, types ...string) {
-
+}, types ...string,
+) {
v := schema.Validations()
if len(types) == 0 {
types = []string{tpe}
@@ -1049,7 +1047,8 @@ func guardValidations(tpe string, schema interface {
func guardFormatConflicts(format string, schema interface {
Validations() spec.SchemaValidations
SetValidations(spec.SchemaValidations)
-}) {
+},
+) {
v := schema.Validations()
msg := fmt.Sprintf("for format %q", format)
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/README.md b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/README.md
deleted file mode 100644
index 1ae6f766f72..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# scan
-
-Pre go1.11 version of the go source parser, without support for go modules.
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/classifier.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/classifier.go
deleted file mode 100644
index e674272d074..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/classifier.go
+++ /dev/null
@@ -1,166 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "fmt"
- "go/ast"
- "log"
- "regexp"
-
- "golang.org/x/tools/go/loader"
-)
-
-type packageFilter struct {
- Name string
-}
-
-func (pf *packageFilter) Matches(path string) bool {
- matched, err := regexp.MatchString(pf.Name, path)
- if err != nil {
- log.Fatal(err)
- }
- return matched
-}
-
-type packageFilters []packageFilter
-
-func (pf packageFilters) HasFilters() bool {
- return len(pf) > 0
-}
-
-func (pf packageFilters) Matches(path string) bool {
- for _, mod := range pf {
- if mod.Matches(path) {
- return true
- }
- }
- return false
-}
-
-type classifiedProgram struct {
- Meta []*ast.File
- Models []*ast.File
- Routes []*ast.File
- Operations []*ast.File
- Parameters []*ast.File
- Responses []*ast.File
-}
-
-// programClassifier classifies the files of a program into buckets
-// for processing by a swagger spec generator. This buckets files in
-// 3 groups: Meta, Models and Operations.
-//
-// # Each of these buckets is then processed with an appropriate parsing strategy
-//
-// When there are Include or Exclude filters provide they are used to limit the
-// candidates prior to parsing.
-// The include filters take precedence over the excludes. So when something appears
-// in both filters it will be included.
-type programClassifier struct {
- Includes packageFilters
- Excludes packageFilters
-}
-
-func (pc *programClassifier) Classify(prog *loader.Program) (*classifiedProgram, error) {
- var cp classifiedProgram
- for pkg, pkgInfo := range prog.AllPackages {
- if Debug {
- log.Printf("analyzing: %s\n", pkg.Path())
- }
- if pc.Includes.HasFilters() {
- if !pc.Includes.Matches(pkg.Path()) {
- continue
- }
- } else if pc.Excludes.HasFilters() {
- if pc.Excludes.Matches(pkg.Path()) {
- continue
- }
- }
-
- for _, file := range pkgInfo.Files {
- var ro, op, mt, pm, rs, mm bool // only add a particular file once
- for _, comments := range file.Comments {
- var seenStruct string
- for _, cline := range comments.List {
- if cline != nil {
- matches := rxSwaggerAnnotation.FindStringSubmatch(cline.Text)
- if len(matches) > 1 {
- switch matches[1] {
- case "route":
- if !ro {
- cp.Routes = append(cp.Routes, file)
- ro = true
- }
- case "operation":
- if !op {
- cp.Operations = append(cp.Operations, file)
- op = true
- }
- case "model":
- if !mm {
- cp.Models = append(cp.Models, file)
- mm = true
- }
- if seenStruct == "" || seenStruct == matches[1] {
- seenStruct = matches[1]
- } else {
- return nil, fmt.Errorf("classifier: already annotated as %s, can't also be %q - %s", seenStruct, matches[1], cline.Text)
- }
- case "meta":
- if !mt {
- cp.Meta = append(cp.Meta, file)
- mt = true
- }
- case "parameters":
- if !pm {
- cp.Parameters = append(cp.Parameters, file)
- pm = true
- }
- if seenStruct == "" || seenStruct == matches[1] {
- seenStruct = matches[1]
- } else {
- return nil, fmt.Errorf("classifier: already annotated as %s, can't also be %q - %s", seenStruct, matches[1], cline.Text)
- }
- case "response":
- if !rs {
- cp.Responses = append(cp.Responses, file)
- rs = true
- }
- if seenStruct == "" || seenStruct == matches[1] {
- seenStruct = matches[1]
- } else {
- return nil, fmt.Errorf("classifier: already annotated as %s, can't also be %q - %s", seenStruct, matches[1], cline.Text)
- }
- case "strfmt", "name", "discriminated", "file", "enum", "default", "alias", "type":
- // TODO: perhaps collect these and pass along to avoid lookups later on
- case "allOf":
- case "ignore":
- default:
- return nil, fmt.Errorf("classifier: unknown swagger annotation %q", matches[1])
- }
- }
-
- }
- }
- }
- }
- }
-
- return &cp, nil
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/doc.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/doc.go
deleted file mode 100644
index 2bc415a8ffe..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/doc.go
+++ /dev/null
@@ -1,89 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package scan provides a scanner for go files that produces a swagger spec document.
-
-This package is intended for pre-go1.11 versions, and does not support go modules.
-
-You give it a main file and it will parse all the files that are required by that main
-package to produce a swagger specification.
-
-To use you can add a go:generate comment to your main file for example:
-
- //go:generate swagger generate spec
-
-The following annotations exist:
-
-swagger:meta
-
-The swagger:meta annotation flags a file as source for metadata about the API.
-This is typically a doc.go file with your package documentation.
-
-You can specify a Consumes and Produces key which has a new content type on each line
-Schemes is a tag that is required and allows for a comma separated string composed of:
-http, https, ws or wss
-
-Host and BasePath can be specified but those values will be defaults,
-they should get substituted when serving the swagger spec.
-
-Default parameters and responses are not supported at this stage, for those you can edit the template json.
-
-swagger:strfmt [name]
-
-A swagger:strfmt annotation names a type as a string formatter. The name is mandatory and that is
-what will be used as format name for this particular string format.
-String formats should only be used for very well known formats.
-
-swagger:model [?model name]
-
-A swagger:model annotation optionally gets a model name as extra data on the line.
-when this appears anywhere in a comment for a struct, then that struct becomes a schema
-in the definitions object of swagger.
-
-The struct gets analyzed and all the collected models are added to the tree.
-The refs are tracked separately so that they can be renamed later on.
-
-When this annotation is found to be on an interface instead of a struct, the properties are provided
-through exported nullary methods.
-
-A property of an interface model can have a Discriminator: true annotation to mark that field as
-the field that will contain the discriminator value.
-
-swagger:route [method] [path pattern] [operation id] [?tag1 tag2 tag3]
-
-A swagger:route annotation links a path to a method.
-This operation gets a unique id, which is used in various places as method name.
-One such usage is in method names for client generation for example.
-
-Because there are many routers available, this tool does not try to parse the paths
-you provided to your routing library of choice. So you have to specify your path pattern
-yourself in valid swagger syntax.
-
-swagger:params [operationid1 operationid2]
-
-Links a struct to one or more operations. The params in the resulting swagger spec can be composed of several structs.
-There are no guarantees given on how property name overlaps are resolved when several structs apply to the same operation.
-This tag works very similarly to the swagger:model tag except that it produces valid parameter objects instead of schema
-objects.
-
-swagger:response [?response name]
-
-Reads a struct decorated with swagger:response and uses that information to fill up the headers and the schema for a response.
-A swagger:route can specify a response name for a status code and then the matching response will be used for that operation in the swagger definition.
-*/
-package scan
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/enum.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/enum.go
deleted file mode 100644
index d1ecc9c8717..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/enum.go
+++ /dev/null
@@ -1,84 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-package scan
-
-import (
- "go/ast"
- "strconv"
- "strings"
- "unicode"
-)
-
-func upperSnakeCase(s string) string {
- in := []rune(s)
- isLower := func(idx int) bool {
- return idx >= 0 && idx < len(in) && unicode.IsLower(in[idx])
- }
-
- out := make([]rune, 0, len(in)+len(in)/2)
-
- for i, r := range in {
- if unicode.IsUpper(r) {
- r = unicode.ToLower(r)
- if i > 0 && in[i-1] != '_' && (isLower(i-1) || isLower(i+1)) {
- out = append(out, '_')
- }
- }
- out = append(out, r)
- }
-
- return strings.ToUpper(string(out))
-}
-
-func getEnumBasicLitValue(basicLit *ast.BasicLit) interface{} {
- switch basicLit.Kind.String() {
- case "INT":
- if result, err := strconv.ParseInt(basicLit.Value, 10, 64); err == nil {
- return result
- }
- case "FLOAT":
- if result, err := strconv.ParseFloat(basicLit.Value, 64); err == nil {
- return result
- }
- default:
- return strings.Trim(basicLit.Value, "\"")
- }
- return nil
-}
-
-func getEnumValues(file *ast.File, typeName string) (list []interface{}) {
- for _, decl := range file.Decls {
- genDecl, ok := decl.(*ast.GenDecl)
-
- if !ok {
- continue
- }
-
- if genDecl.Tok.String() == "const" {
- for _, spec := range genDecl.Specs {
- if valueSpec, ok := spec.(*ast.ValueSpec); ok {
- switch valueSpec.Type.(type) {
- case *ast.Ident:
- if valueSpec.Type.(*ast.Ident).Name == typeName {
- if basicLit, ok := valueSpec.Values[0].(*ast.BasicLit); ok {
- list = append(list, getEnumBasicLitValue(basicLit))
- }
- }
- default:
- var name = valueSpec.Names[0].Name
- if strings.HasPrefix(name, upperSnakeCase(typeName)) {
- var values = strings.SplitN(name, "__", 2)
- if len(values) == 2 {
- list = append(list, values[1])
- }
- }
- }
-
- }
-
- }
- }
- }
- return
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/meta.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/meta.go
deleted file mode 100644
index f5b5ed5dd36..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/meta.go
+++ /dev/null
@@ -1,246 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "encoding/json"
- "fmt"
- "net/mail"
- "regexp"
- "strings"
-
- "github.com/go-openapi/spec"
-)
-
-func metaTOSSetter(meta *spec.Info) func([]string) {
- return func(lines []string) {
- meta.TermsOfService = joinDropLast(lines)
- }
-}
-
-func metaConsumesSetter(meta *spec.Swagger) func([]string) {
- return func(consumes []string) { meta.Consumes = consumes }
-}
-
-func metaProducesSetter(meta *spec.Swagger) func([]string) {
- return func(produces []string) { meta.Produces = produces }
-}
-
-func metaSchemeSetter(meta *spec.Swagger) func([]string) {
- return func(schemes []string) { meta.Schemes = schemes }
-}
-
-func metaSecuritySetter(meta *spec.Swagger) func([]map[string][]string) {
- return func(secDefs []map[string][]string) { meta.Security = secDefs }
-}
-
-func metaSecurityDefinitionsSetter(meta *spec.Swagger) func(json.RawMessage) error {
- return func(jsonValue json.RawMessage) error {
- var jsonData spec.SecurityDefinitions
- err := json.Unmarshal(jsonValue, &jsonData)
- if err != nil {
- return err
- }
- meta.SecurityDefinitions = jsonData
- return nil
- }
-}
-
-func metaVendorExtensibleSetter(meta *spec.Swagger) func(json.RawMessage) error {
- return func(jsonValue json.RawMessage) error {
- var jsonData spec.Extensions
- err := json.Unmarshal(jsonValue, &jsonData)
- if err != nil {
- return err
- }
- for k := range jsonData {
- if !rxAllowedExtensions.MatchString(k) {
- return fmt.Errorf("invalid schema extension name, should start from `x-`: %s", k)
- }
- }
- meta.Extensions = jsonData
- return nil
- }
-}
-
-func infoVendorExtensibleSetter(meta *spec.Swagger) func(json.RawMessage) error {
- return func(jsonValue json.RawMessage) error {
- var jsonData spec.Extensions
- err := json.Unmarshal(jsonValue, &jsonData)
- if err != nil {
- return err
- }
- for k := range jsonData {
- if !rxAllowedExtensions.MatchString(k) {
- return fmt.Errorf("invalid schema extension name, should start from `x-`: %s", k)
- }
- }
- meta.Info.Extensions = jsonData
- return nil
- }
-}
-
-func newMetaParser(swspec *spec.Swagger) *sectionedParser {
- sp := new(sectionedParser)
- if swspec.Info == nil {
- swspec.Info = new(spec.Info)
- }
- info := swspec.Info
- sp.setTitle = func(lines []string) {
- tosave := joinDropLast(lines)
- if len(tosave) > 0 {
- tosave = rxStripTitleComments.ReplaceAllString(tosave, "")
- }
- info.Title = tosave
- }
- sp.setDescription = func(lines []string) { info.Description = joinDropLast(lines) }
- sp.taggers = []tagParser{
- newMultiLineTagParser("TOS", newMultilineDropEmptyParser(rxTOS, metaTOSSetter(info)), false),
- newMultiLineTagParser("Consumes", newMultilineDropEmptyParser(rxConsumes, metaConsumesSetter(swspec)), false),
- newMultiLineTagParser("Produces", newMultilineDropEmptyParser(rxProduces, metaProducesSetter(swspec)), false),
- newSingleLineTagParser("Schemes", newSetSchemes(metaSchemeSetter(swspec))),
- newMultiLineTagParser("Security", newSetSecurity(rxSecuritySchemes, metaSecuritySetter(swspec)), false),
- newMultiLineTagParser("SecurityDefinitions", newYamlParser(rxSecurity, metaSecurityDefinitionsSetter(swspec)), true),
- newSingleLineTagParser("Version", &setMetaSingle{swspec, rxVersion, setInfoVersion}),
- newSingleLineTagParser("Host", &setMetaSingle{swspec, rxHost, setSwaggerHost}),
- newSingleLineTagParser("BasePath", &setMetaSingle{swspec, rxBasePath, setSwaggerBasePath}),
- newSingleLineTagParser("Contact", &setMetaSingle{swspec, rxContact, setInfoContact}),
- newSingleLineTagParser("License", &setMetaSingle{swspec, rxLicense, setInfoLicense}),
- newMultiLineTagParser("YAMLInfoExtensionsBlock", newYamlParser(rxInfoExtensions, infoVendorExtensibleSetter(swspec)), true),
- newMultiLineTagParser("YAMLExtensionsBlock", newYamlParser(rxExtensions, metaVendorExtensibleSetter(swspec)), true),
- }
- return sp
-}
-
-type setMetaSingle struct {
- spec *spec.Swagger
- rx *regexp.Regexp
- set func(spec *spec.Swagger, lines []string) error
-}
-
-func (s *setMetaSingle) Matches(line string) bool {
- return s.rx.MatchString(line)
-}
-
-func (s *setMetaSingle) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := s.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- return s.set(s.spec, []string{matches[1]})
- }
- return nil
-}
-
-func setSwaggerHost(swspec *spec.Swagger, lines []string) error {
- lns := lines
- if len(lns) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- lns = []string{"localhost"}
- }
- swspec.Host = lns[0]
- return nil
-}
-
-func setSwaggerBasePath(swspec *spec.Swagger, lines []string) error {
- var ln string
- if len(lines) > 0 {
- ln = lines[0]
- }
- swspec.BasePath = ln
- return nil
-}
-
-func setInfoVersion(swspec *spec.Swagger, lines []string) error {
- if len(lines) == 0 {
- return nil
- }
- info := safeInfo(swspec)
- info.Version = strings.TrimSpace(lines[0])
- return nil
-}
-
-func setInfoContact(swspec *spec.Swagger, lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- contact, err := parseContactInfo(lines[0])
- if err != nil {
- return err
- }
- info := safeInfo(swspec)
- info.Contact = contact
- return nil
-}
-
-func parseContactInfo(line string) (*spec.ContactInfo, error) {
- nameEmail, url := splitURL(line)
- var name, email string
- if len(nameEmail) > 0 {
- addr, err := mail.ParseAddress(nameEmail)
- if err != nil {
- return nil, err
- }
- name, email = addr.Name, addr.Address
- }
- return &spec.ContactInfo{
- URL: url,
- Name: name,
- Email: email,
- }, nil
-}
-
-func setInfoLicense(swspec *spec.Swagger, lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- info := safeInfo(swspec)
- line := lines[0]
- name, url := splitURL(line)
- info.License = &spec.License{
- Name: name,
- URL: url,
- }
- return nil
-}
-
-func safeInfo(swspec *spec.Swagger) *spec.Info {
- if swspec.Info == nil {
- swspec.Info = new(spec.Info)
- }
- return swspec.Info
-}
-
-// httpFTPScheme matches http://, https://, ws://, wss://
-var httpFTPScheme = regexp.MustCompile("(?:(?:ht|f)tp|ws)s?://")
-
-func splitURL(line string) (notURL, url string) {
- str := strings.TrimSpace(line)
- parts := httpFTPScheme.FindStringIndex(str)
- if len(parts) == 0 {
- if len(str) > 0 {
- notURL = str
- }
- return
- }
- if len(parts) > 0 {
- notURL = strings.TrimSpace(str[:parts[0]])
- url = strings.TrimSpace(str[parts[0]:])
- }
- return
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/operations.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/operations.go
deleted file mode 100644
index 31e2ea5a926..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/operations.go
+++ /dev/null
@@ -1,85 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "fmt"
- "go/ast"
-
- "github.com/go-openapi/spec"
-
- "golang.org/x/tools/go/loader"
-)
-
-func newOperationsParser(prog *loader.Program) *operationsParser {
- return &operationsParser{
- program: prog,
- }
-}
-
-type operationsParser struct {
- program *loader.Program
- definitions map[string]spec.Schema
- operations map[string]*spec.Operation
- responses map[string]spec.Response
-}
-
-func (op *operationsParser) Parse(gofile *ast.File, target interface{}, includeTags map[string]bool, excludeTags map[string]bool) error {
- tgt := target.(*spec.Paths)
- for _, comsec := range gofile.Comments {
- content := parsePathAnnotation(rxOperation, comsec.List)
-
- if content.Method == "" {
- continue // it's not, next!
- }
-
- if !shouldAcceptTag(content.Tags, includeTags, excludeTags) {
- if Debug {
- fmt.Printf("operation %s %s is ignored due to tag rules\n", content.Method, content.Path)
- }
- continue
- }
-
- pthObj := tgt.Paths[content.Path]
-
- op := setPathOperation(
- content.Method, content.ID,
- &pthObj, op.operations[content.ID])
-
- op.Tags = content.Tags
-
- sp := new(yamlSpecScanner)
- sp.setTitle = func(lines []string) { op.Summary = joinDropLast(lines) }
- sp.setDescription = func(lines []string) { op.Description = joinDropLast(lines) }
-
- if err := sp.Parse(content.Remaining); err != nil {
- return fmt.Errorf("operation (%s): %v", op.ID, err)
- }
- if err := sp.UnmarshalSpec(op.UnmarshalJSON); err != nil {
- return fmt.Errorf("operation (%s): %v", op.ID, err)
- }
-
- if tgt.Paths == nil {
- tgt.Paths = make(map[string]spec.PathItem)
- }
-
- tgt.Paths[content.Path] = pthObj
- }
-
- return nil
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/parameters.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/parameters.go
deleted file mode 100644
index 58d96ebe364..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/parameters.go
+++ /dev/null
@@ -1,515 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "fmt"
- "go/ast"
- "strings"
-
- "github.com/go-openapi/spec"
- "golang.org/x/tools/go/loader"
-)
-
-type operationValidationBuilder interface {
- validationBuilder
- SetCollectionFormat(string)
-}
-
-type paramTypable struct {
- param *spec.Parameter
-}
-
-func (pt paramTypable) Level() int { return 0 }
-
-func (pt paramTypable) Typed(tpe, format string) {
- pt.param.Typed(tpe, format)
-}
-
-func (pt paramTypable) WithEnum(values ...interface{}) {
- pt.param.WithEnum(values...)
-}
-
-func (pt paramTypable) SetRef(ref spec.Ref) {
- pt.param.Ref = ref
-}
-
-func (pt paramTypable) Items() swaggerTypable {
- bdt, schema := bodyTypable(pt.param.In, pt.param.Schema)
- if bdt != nil {
- pt.param.Schema = schema
- return bdt
- }
-
- if pt.param.Items == nil {
- pt.param.Items = new(spec.Items)
- }
- pt.param.Type = "array"
- return itemsTypable{pt.param.Items, 1}
-}
-
-func (pt paramTypable) Schema() *spec.Schema {
- if pt.param.In != "body" {
- return nil
- }
- if pt.param.Schema == nil {
- pt.param.Schema = new(spec.Schema)
- }
- return pt.param.Schema
-}
-
-type itemsTypable struct {
- items *spec.Items
- level int
-}
-
-func (pt itemsTypable) Level() int { return pt.level }
-
-func (pt itemsTypable) Typed(tpe, format string) {
- pt.items.Typed(tpe, format)
-}
-
-func (pt itemsTypable) SetRef(ref spec.Ref) {
- pt.items.Ref = ref
-}
-
-func (pt itemsTypable) WithEnum(values ...interface{}) {
- pt.items.WithEnum(values...)
-}
-
-func (pt itemsTypable) Schema() *spec.Schema {
- return nil
-}
-
-func (pt itemsTypable) Items() swaggerTypable {
- if pt.items.Items == nil {
- pt.items.Items = new(spec.Items)
- }
- pt.items.Type = "array"
- return itemsTypable{pt.items.Items, pt.level + 1}
-}
-
-type paramValidations struct {
- current *spec.Parameter
-}
-
-func (sv paramValidations) SetMaximum(val float64, exclusive bool) {
- sv.current.Maximum = &val
- sv.current.ExclusiveMaximum = exclusive
-}
-func (sv paramValidations) SetMinimum(val float64, exclusive bool) {
- sv.current.Minimum = &val
- sv.current.ExclusiveMinimum = exclusive
-}
-func (sv paramValidations) SetMultipleOf(val float64) { sv.current.MultipleOf = &val }
-func (sv paramValidations) SetMinItems(val int64) { sv.current.MinItems = &val }
-func (sv paramValidations) SetMaxItems(val int64) { sv.current.MaxItems = &val }
-func (sv paramValidations) SetMinLength(val int64) { sv.current.MinLength = &val }
-func (sv paramValidations) SetMaxLength(val int64) { sv.current.MaxLength = &val }
-func (sv paramValidations) SetPattern(val string) { sv.current.Pattern = val }
-func (sv paramValidations) SetUnique(val bool) { sv.current.UniqueItems = val }
-func (sv paramValidations) SetCollectionFormat(val string) { sv.current.CollectionFormat = val }
-func (sv paramValidations) SetEnum(val string) {
- sv.current.Enum = parseEnum(val, &spec.SimpleSchema{Type: sv.current.Type, Format: sv.current.Format})
-}
-func (sv paramValidations) SetDefault(val interface{}) { sv.current.Default = val }
-func (sv paramValidations) SetExample(val interface{}) { sv.current.Example = val }
-
-type itemsValidations struct {
- current *spec.Items
-}
-
-func (sv itemsValidations) SetMaximum(val float64, exclusive bool) {
- sv.current.Maximum = &val
- sv.current.ExclusiveMaximum = exclusive
-}
-func (sv itemsValidations) SetMinimum(val float64, exclusive bool) {
- sv.current.Minimum = &val
- sv.current.ExclusiveMinimum = exclusive
-}
-func (sv itemsValidations) SetMultipleOf(val float64) { sv.current.MultipleOf = &val }
-func (sv itemsValidations) SetMinItems(val int64) { sv.current.MinItems = &val }
-func (sv itemsValidations) SetMaxItems(val int64) { sv.current.MaxItems = &val }
-func (sv itemsValidations) SetMinLength(val int64) { sv.current.MinLength = &val }
-func (sv itemsValidations) SetMaxLength(val int64) { sv.current.MaxLength = &val }
-func (sv itemsValidations) SetPattern(val string) { sv.current.Pattern = val }
-func (sv itemsValidations) SetUnique(val bool) { sv.current.UniqueItems = val }
-func (sv itemsValidations) SetCollectionFormat(val string) { sv.current.CollectionFormat = val }
-func (sv itemsValidations) SetEnum(val string) {
- sv.current.Enum = parseEnum(val, &spec.SimpleSchema{Type: sv.current.Type, Format: sv.current.Format})
-}
-func (sv itemsValidations) SetDefault(val interface{}) { sv.current.Default = val }
-func (sv itemsValidations) SetExample(val interface{}) { sv.current.Example = val }
-
-type paramDecl struct {
- File *ast.File
- Decl *ast.GenDecl
- TypeSpec *ast.TypeSpec
- OperationIDs []string
-}
-
-func (sd *paramDecl) inferOperationIDs() (opids []string) {
- if len(sd.OperationIDs) > 0 {
- opids = sd.OperationIDs
- return
- }
-
- if sd.Decl.Doc != nil {
- for _, cmt := range sd.Decl.Doc.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxParametersOverride.FindStringSubmatch(ln)
- if len(matches) > 1 && len(matches[1]) > 0 {
- for _, pt := range strings.Split(matches[1], " ") {
- tr := strings.TrimSpace(pt)
- if len(tr) > 0 {
- opids = append(opids, tr)
- }
- }
- }
- }
- }
- }
- sd.OperationIDs = append(sd.OperationIDs, opids...)
- return
-}
-
-func newParameterParser(prog *loader.Program) *paramStructParser {
- scp := new(paramStructParser)
- scp.program = prog
- scp.scp = newSchemaParser(prog)
- return scp
-}
-
-type paramStructParser struct {
- program *loader.Program
- postDecls []schemaDecl
- scp *schemaParser
-}
-
-// Parse will traverse a file and look for parameters.
-func (pp *paramStructParser) Parse(gofile *ast.File, target interface{}) error {
- tgt := target.(map[string]*spec.Operation)
- for _, decl := range gofile.Decls {
- switch x1 := decl.(type) {
- // Check for parameters at the package level.
- case *ast.GenDecl:
- for _, spc := range x1.Specs {
- switch x2 := spc.(type) {
- case *ast.TypeSpec:
- sd := paramDecl{gofile, x1, x2, nil}
- sd.inferOperationIDs()
- if err := pp.parseDecl(tgt, sd); err != nil {
- return err
- }
- }
- }
- // Check for parameters inside functions.
- case *ast.FuncDecl:
- for _, b := range x1.Body.List {
- switch x2 := b.(type) {
- case *ast.DeclStmt:
- switch x3 := x2.Decl.(type) {
- case *ast.GenDecl:
- for _, spc := range x3.Specs {
- switch x4 := spc.(type) {
- case *ast.TypeSpec:
- sd := paramDecl{gofile, x3, x4, nil}
- sd.inferOperationIDs()
- if err := pp.parseDecl(tgt, sd); err != nil {
- return err
- }
- }
- }
- }
- }
- }
- }
- }
- return nil
-}
-
-func (pp *paramStructParser) parseDecl(operations map[string]*spec.Operation, decl paramDecl) error {
- // check if there is a swagger:parameters tag that is followed by one or more words,
- // these words are the ids of the operations this parameter struct applies to
- // once type name is found convert it to a schema, by looking up the schema in the
- // parameters dictionary that got passed into this parse method
- for _, opid := range decl.inferOperationIDs() {
- operation, ok := operations[opid]
- if !ok {
- operation = new(spec.Operation)
- operations[opid] = operation
- operation.ID = opid
- }
-
- // analyze struct body for fields etc
- // each exported struct field:
- // * gets a type mapped to a go primitive
- // * perhaps gets a format
- // * has to document the validations that apply for the type and the field
- // * when the struct field points to a model it becomes a ref: #/definitions/ModelName
- // * comments that aren't tags is used as the description
- if tpe, ok := decl.TypeSpec.Type.(*ast.StructType); ok {
- if err := pp.parseStructType(decl.File, operation, tpe, make(map[string]spec.Parameter)); err != nil {
- return err
- }
- }
-
- //operations[opid] = operation
- }
- return nil
-}
-
-func (pp *paramStructParser) parseEmbeddedStruct(gofile *ast.File, operation *spec.Operation, expr ast.Expr, seenPreviously map[string]spec.Parameter) error {
- switch tpe := expr.(type) {
- case *ast.Ident:
- // do lookup of type
- // take primitives into account, they should result in an error for swagger
- pkg, err := pp.scp.packageForFile(gofile, tpe)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- file, _, ts, err := findSourceFile(pkg, tpe.Name)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- if st, ok := ts.Type.(*ast.StructType); ok {
- return pp.parseStructType(file, operation, st, seenPreviously)
- }
- case *ast.SelectorExpr:
- // look up package, file and then type
- pkg, err := pp.scp.packageForSelector(gofile, tpe.X)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- file, _, ts, err := findSourceFile(pkg, tpe.Sel.Name)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- if st, ok := ts.Type.(*ast.StructType); ok {
- return pp.parseStructType(file, operation, st, seenPreviously)
- }
- case *ast.StarExpr:
- return pp.parseEmbeddedStruct(gofile, operation, tpe.X, seenPreviously)
- }
- fmt.Printf("3%#v\n", expr)
- return fmt.Errorf("unable to resolve embedded struct for: %v", expr)
-}
-
-func (pp *paramStructParser) parseStructType(gofile *ast.File, operation *spec.Operation, tpe *ast.StructType, seenPreviously map[string]spec.Parameter) error {
- if tpe.Fields != nil {
- pt := seenPreviously
-
- for _, fld := range tpe.Fields.List {
- if len(fld.Names) == 0 {
- // when the embedded struct is annotated with swagger:allOf it will be used as allOf property
- // otherwise the fields will just be included as normal properties
- if err := pp.parseEmbeddedStruct(gofile, operation, fld.Type, pt); err != nil {
- return err
- }
- }
- }
-
- // a slice used to keep track of the sequence of the map keys, as maps does not keep to any specific sequence (since Go-1.4)
- sequence := []string{}
-
- for _, fld := range tpe.Fields.List {
- if len(fld.Names) > 0 && fld.Names[0] != nil && fld.Names[0].IsExported() {
- gnm := fld.Names[0].Name
- nm, ignore, _, err := parseJSONTag(fld)
- if err != nil {
- return err
- }
- if ignore {
- continue
- }
-
- in := "query"
- // scan for param location first, this changes some behavior down the line
- if fld.Doc != nil {
- for _, cmt := range fld.Doc.List {
- for _, line := range strings.Split(cmt.Text, "\n") {
- matches := rxIn.FindStringSubmatch(line)
- if len(matches) > 0 && len(strings.TrimSpace(matches[1])) > 0 {
- in = strings.TrimSpace(matches[1])
- }
- }
- }
- }
-
- ps := pt[nm]
- ps.In = in
- var pty swaggerTypable = paramTypable{&ps}
- if in == "body" {
- pty = schemaTypable{pty.Schema(), 0}
- }
- if in == "formData" && fld.Doc != nil && fileParam(fld.Doc) {
- pty.Typed("file", "")
- } else {
- if err := pp.scp.parseNamedType(gofile, fld.Type, pty); err != nil {
- return err
- }
- }
-
- if strfmtName, ok := strfmtName(fld.Doc); ok {
- ps.Typed("string", strfmtName)
- ps.Ref = spec.Ref{}
- }
-
- sp := new(sectionedParser)
- sp.setDescription = func(lines []string) { ps.Description = joinDropLast(lines) }
- if ps.Ref.String() == "" {
- sp.taggers = []tagParser{
- newSingleLineTagParser("in", &matchOnlyParam{&ps, rxIn}),
- newSingleLineTagParser("maximum", &setMaximum{paramValidations{&ps}, rxf(rxMaximumFmt, "")}),
- newSingleLineTagParser("minimum", &setMinimum{paramValidations{&ps}, rxf(rxMinimumFmt, "")}),
- newSingleLineTagParser("multipleOf", &setMultipleOf{paramValidations{&ps}, rxf(rxMultipleOfFmt, "")}),
- newSingleLineTagParser("minLength", &setMinLength{paramValidations{&ps}, rxf(rxMinLengthFmt, "")}),
- newSingleLineTagParser("maxLength", &setMaxLength{paramValidations{&ps}, rxf(rxMaxLengthFmt, "")}),
- newSingleLineTagParser("pattern", &setPattern{paramValidations{&ps}, rxf(rxPatternFmt, "")}),
- newSingleLineTagParser("collectionFormat", &setCollectionFormat{paramValidations{&ps}, rxf(rxCollectionFormatFmt, "")}),
- newSingleLineTagParser("minItems", &setMinItems{paramValidations{&ps}, rxf(rxMinItemsFmt, "")}),
- newSingleLineTagParser("maxItems", &setMaxItems{paramValidations{&ps}, rxf(rxMaxItemsFmt, "")}),
- newSingleLineTagParser("unique", &setUnique{paramValidations{&ps}, rxf(rxUniqueFmt, "")}),
- newSingleLineTagParser("enum", &setEnum{paramValidations{&ps}, rxf(rxEnumFmt, "")}),
- newSingleLineTagParser("default", &setDefault{&ps.SimpleSchema, paramValidations{&ps}, rxf(rxDefaultFmt, "")}),
- newSingleLineTagParser("example", &setExample{&ps.SimpleSchema, paramValidations{&ps}, rxf(rxExampleFmt, "")}),
- newSingleLineTagParser("required", &setRequiredParam{&ps}),
- }
-
- itemsTaggers := func(items *spec.Items, level int) []tagParser {
- // the expression is 1-index based not 0-index
- itemsPrefix := fmt.Sprintf(rxItemsPrefixFmt, level+1)
-
- return []tagParser{
- newSingleLineTagParser(fmt.Sprintf("items%dMaximum", level), &setMaximum{itemsValidations{items}, rxf(rxMaximumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinimum", level), &setMinimum{itemsValidations{items}, rxf(rxMinimumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMultipleOf", level), &setMultipleOf{itemsValidations{items}, rxf(rxMultipleOfFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinLength", level), &setMinLength{itemsValidations{items}, rxf(rxMinLengthFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMaxLength", level), &setMaxLength{itemsValidations{items}, rxf(rxMaxLengthFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dPattern", level), &setPattern{itemsValidations{items}, rxf(rxPatternFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dCollectionFormat", level), &setCollectionFormat{itemsValidations{items}, rxf(rxCollectionFormatFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinItems", level), &setMinItems{itemsValidations{items}, rxf(rxMinItemsFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMaxItems", level), &setMaxItems{itemsValidations{items}, rxf(rxMaxItemsFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dUnique", level), &setUnique{itemsValidations{items}, rxf(rxUniqueFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dEnum", level), &setEnum{itemsValidations{items}, rxf(rxEnumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dDefault", level), &setDefault{&items.SimpleSchema, itemsValidations{items}, rxf(rxDefaultFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dExample", level), &setExample{&items.SimpleSchema, itemsValidations{items}, rxf(rxExampleFmt, itemsPrefix)}),
- }
- }
-
- var parseArrayTypes func(expr ast.Expr, items *spec.Items, level int) ([]tagParser, error)
- parseArrayTypes = func(expr ast.Expr, items *spec.Items, level int) ([]tagParser, error) {
- if items == nil {
- return []tagParser{}, nil
- }
- switch iftpe := expr.(type) {
- case *ast.ArrayType:
- eleTaggers := itemsTaggers(items, level)
- sp.taggers = append(eleTaggers, sp.taggers...)
- otherTaggers, err := parseArrayTypes(iftpe.Elt, items.Items, level+1)
- if err != nil {
- return nil, err
- }
- return otherTaggers, nil
- case *ast.SelectorExpr:
- otherTaggers, err := parseArrayTypes(iftpe.Sel, items.Items, level+1)
- if err != nil {
- return nil, err
- }
- return otherTaggers, nil
- case *ast.Ident:
- taggers := []tagParser{}
- if iftpe.Obj == nil {
- taggers = itemsTaggers(items, level)
- }
- otherTaggers, err := parseArrayTypes(expr, items.Items, level+1)
- if err != nil {
- return nil, err
- }
- return append(taggers, otherTaggers...), nil
- case *ast.StarExpr:
- otherTaggers, err := parseArrayTypes(iftpe.X, items, level)
- if err != nil {
- return nil, err
- }
- return otherTaggers, nil
- default:
- return nil, fmt.Errorf("unknown field type ele for %q", nm)
- }
- }
-
- // check if this is a primitive, if so parse the validations from the
- // doc comments of the slice declaration.
- if ftped, ok := fld.Type.(*ast.ArrayType); ok {
- taggers, err := parseArrayTypes(ftped.Elt, ps.Items, 0)
- if err != nil {
- return err
- }
- sp.taggers = append(taggers, sp.taggers...)
- }
-
- } else {
-
- sp.taggers = []tagParser{
- newSingleLineTagParser("in", &matchOnlyParam{&ps, rxIn}),
- newSingleLineTagParser("required", &matchOnlyParam{&ps, rxRequired}),
- }
- }
- if err := sp.Parse(fld.Doc); err != nil {
- return err
- }
- if ps.In == "path" {
- ps.Required = true
- }
-
- if ps.Name == "" {
- ps.Name = nm
- }
-
- if nm != gnm {
- addExtension(&ps.VendorExtensible, "x-go-name", gnm)
- }
- pt[nm] = ps
- sequence = append(sequence, nm)
- }
- }
-
- for _, k := range sequence {
- p := pt[k]
- for i, v := range operation.Parameters {
- if v.Name == k {
- operation.Parameters = append(operation.Parameters[:i], operation.Parameters[i+1:]...)
- break
- }
- }
- operation.Parameters = append(operation.Parameters, p)
- }
- }
-
- return nil
-}
-
-func isAliasParam(prop swaggerTypable) bool {
- var isParam bool
- if param, ok := prop.(paramTypable); ok {
- isParam = param.param.In == "query" ||
- param.param.In == "path" ||
- param.param.In == "formData"
- }
- return isParam
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/path.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/path.go
deleted file mode 100644
index 7302d41c38f..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/path.go
+++ /dev/null
@@ -1,151 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "go/ast"
- "regexp"
- "strings"
-
- "github.com/go-openapi/spec"
-)
-
-type parsedPathContent struct {
- Method, Path, ID string
- Tags []string
- Remaining *ast.CommentGroup
-}
-
-func parsePathAnnotation(annotation *regexp.Regexp, lines []*ast.Comment) (cnt parsedPathContent) {
- var justMatched bool
-
- for _, cmt := range lines {
- for _, line := range strings.Split(cmt.Text, "\n") {
- matches := annotation.FindStringSubmatch(line)
- if len(matches) > 3 {
- cnt.Method, cnt.Path, cnt.ID = matches[1], matches[2], matches[len(matches)-1]
- cnt.Tags = rxSpace.Split(matches[3], -1)
- if len(matches[3]) == 0 {
- cnt.Tags = nil
- }
- justMatched = true
- } else if cnt.Method != "" {
- if cnt.Remaining == nil {
- cnt.Remaining = new(ast.CommentGroup)
- }
- if !justMatched || strings.TrimSpace(rxStripComments.ReplaceAllString(line, "")) != "" {
- cc := new(ast.Comment)
- cc.Slash = cmt.Slash
- cc.Text = line
- cnt.Remaining.List = append(cnt.Remaining.List, cc)
- justMatched = false
- }
- }
- }
- }
-
- return
-}
-
-func setPathOperation(method, id string, pthObj *spec.PathItem, op *spec.Operation) *spec.Operation {
- if op == nil {
- op = new(spec.Operation)
- op.ID = id
- }
-
- switch strings.ToUpper(method) {
- case "GET":
- if pthObj.Get != nil {
- if id == pthObj.Get.ID {
- op = pthObj.Get
- } else {
- pthObj.Get = op
- }
- } else {
- pthObj.Get = op
- }
-
- case "POST":
- if pthObj.Post != nil {
- if id == pthObj.Post.ID {
- op = pthObj.Post
- } else {
- pthObj.Post = op
- }
- } else {
- pthObj.Post = op
- }
-
- case "PUT":
- if pthObj.Put != nil {
- if id == pthObj.Put.ID {
- op = pthObj.Put
- } else {
- pthObj.Put = op
- }
- } else {
- pthObj.Put = op
- }
-
- case "PATCH":
- if pthObj.Patch != nil {
- if id == pthObj.Patch.ID {
- op = pthObj.Patch
- } else {
- pthObj.Patch = op
- }
- } else {
- pthObj.Patch = op
- }
-
- case "HEAD":
- if pthObj.Head != nil {
- if id == pthObj.Head.ID {
- op = pthObj.Head
- } else {
- pthObj.Head = op
- }
- } else {
- pthObj.Head = op
- }
-
- case "DELETE":
- if pthObj.Delete != nil {
- if id == pthObj.Delete.ID {
- op = pthObj.Delete
- } else {
- pthObj.Delete = op
- }
- } else {
- pthObj.Delete = op
- }
-
- case "OPTIONS":
- if pthObj.Options != nil {
- if id == pthObj.Options.ID {
- op = pthObj.Options
- } else {
- pthObj.Options = op
- }
- } else {
- pthObj.Options = op
- }
- }
-
- return op
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/responses.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/responses.go
deleted file mode 100644
index 327b8a488fa..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/responses.go
+++ /dev/null
@@ -1,453 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "fmt"
- "go/ast"
- "strings"
-
- "golang.org/x/tools/go/loader"
-
- "github.com/go-openapi/spec"
-)
-
-type responseTypable struct {
- in string
- header *spec.Header
- response *spec.Response
-}
-
-func (ht responseTypable) Level() int { return 0 }
-
-func (ht responseTypable) Typed(tpe, format string) {
- ht.header.Typed(tpe, format)
-}
-
-func (ht responseTypable) WithEnum(values ...interface{}) {
- ht.header.WithEnum(values)
-}
-
-func bodyTypable(in string, schema *spec.Schema) (swaggerTypable, *spec.Schema) {
- if in == "body" {
- // get the schema for items on the schema property
- if schema == nil {
- schema = new(spec.Schema)
- }
- if schema.Items == nil {
- schema.Items = new(spec.SchemaOrArray)
- }
- if schema.Items.Schema == nil {
- schema.Items.Schema = new(spec.Schema)
- }
- schema.Typed("array", "")
- return schemaTypable{schema.Items.Schema, 0}, schema
- }
- return nil, nil
-}
-
-func (ht responseTypable) Items() swaggerTypable {
- bdt, schema := bodyTypable(ht.in, ht.response.Schema)
- if bdt != nil {
- ht.response.Schema = schema
- return bdt
- }
-
- if ht.header.Items == nil {
- ht.header.Items = new(spec.Items)
- }
- ht.header.Type = "array"
- return itemsTypable{ht.header.Items, 1}
-}
-
-func (ht responseTypable) SetRef(ref spec.Ref) {
- // having trouble seeing the usefulness of this one here
- ht.Schema().Ref = ref
-}
-
-func (ht responseTypable) Schema() *spec.Schema {
- if ht.response.Schema == nil {
- ht.response.Schema = new(spec.Schema)
- }
- return ht.response.Schema
-}
-
-func (ht responseTypable) SetSchema(schema *spec.Schema) {
- ht.response.Schema = schema
-}
-
-func (ht responseTypable) CollectionOf(items *spec.Items, format string) {
- ht.header.CollectionOf(items, format)
-}
-
-type headerValidations struct {
- current *spec.Header
-}
-
-func (sv headerValidations) SetMaximum(val float64, exclusive bool) {
- sv.current.Maximum = &val
- sv.current.ExclusiveMaximum = exclusive
-}
-func (sv headerValidations) SetMinimum(val float64, exclusive bool) {
- sv.current.Minimum = &val
- sv.current.ExclusiveMinimum = exclusive
-}
-func (sv headerValidations) SetMultipleOf(val float64) { sv.current.MultipleOf = &val }
-func (sv headerValidations) SetMinItems(val int64) { sv.current.MinItems = &val }
-func (sv headerValidations) SetMaxItems(val int64) { sv.current.MaxItems = &val }
-func (sv headerValidations) SetMinLength(val int64) { sv.current.MinLength = &val }
-func (sv headerValidations) SetMaxLength(val int64) { sv.current.MaxLength = &val }
-func (sv headerValidations) SetPattern(val string) { sv.current.Pattern = val }
-func (sv headerValidations) SetUnique(val bool) { sv.current.UniqueItems = val }
-func (sv headerValidations) SetCollectionFormat(val string) { sv.current.CollectionFormat = val }
-func (sv headerValidations) SetEnum(val string) {
- sv.current.Enum = parseEnum(val, &spec.SimpleSchema{Type: sv.current.Type, Format: sv.current.Format})
-}
-func (sv headerValidations) SetDefault(val interface{}) { sv.current.Default = val }
-func (sv headerValidations) SetExample(val interface{}) { sv.current.Example = val }
-
-func newResponseDecl(file *ast.File, decl *ast.GenDecl, ts *ast.TypeSpec) responseDecl {
- var rd responseDecl
- rd.File = file
- rd.Decl = decl
- rd.TypeSpec = ts
- rd.inferNames()
- return rd
-}
-
-type responseDecl struct {
- File *ast.File
- Decl *ast.GenDecl
- TypeSpec *ast.TypeSpec
- GoName string
- Name string
- annotated bool
-}
-
-func (sd *responseDecl) hasAnnotation() bool {
- sd.inferNames()
- return sd.annotated
-}
-
-func (sd *responseDecl) inferNames() (goName string, name string) {
- if sd.GoName != "" {
- goName, name = sd.GoName, sd.Name
- return
- }
- goName = sd.TypeSpec.Name.Name
- name = goName
- if sd.Decl.Doc != nil {
- DECLS:
- for _, cmt := range sd.Decl.Doc.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxResponseOverride.FindStringSubmatch(ln)
- if len(matches) > 0 {
- sd.annotated = true
- }
- if len(matches) > 1 && len(matches[1]) > 0 {
- name = matches[1]
- break DECLS
- }
- }
- }
- }
- sd.GoName = goName
- sd.Name = name
- return
-}
-
-func newResponseParser(prog *loader.Program) *responseParser {
- return &responseParser{prog, nil, newSchemaParser(prog)}
-}
-
-type responseParser struct {
- program *loader.Program
- postDecls []schemaDecl
- scp *schemaParser
-}
-
-func (rp *responseParser) Parse(gofile *ast.File, target interface{}) error {
- tgt := target.(map[string]spec.Response)
- for _, decl := range gofile.Decls {
- switch x1 := decl.(type) {
- // Check for parameters at the package level.
- case *ast.GenDecl:
- for _, spc := range x1.Specs {
- switch x2 := spc.(type) {
- case *ast.TypeSpec:
- sd := newResponseDecl(gofile, x1, x2)
- if sd.hasAnnotation() {
- if err := rp.parseDecl(tgt, sd); err != nil {
- return err
- }
- }
- }
- }
- // Check for parameters inside functions.
- case *ast.FuncDecl:
- for _, b := range x1.Body.List {
- switch x2 := b.(type) {
- case *ast.DeclStmt:
- switch x3 := x2.Decl.(type) {
- case *ast.GenDecl:
- for _, spc := range x3.Specs {
- switch x4 := spc.(type) {
- case *ast.TypeSpec:
- sd := newResponseDecl(gofile, x3, x4)
- if sd.hasAnnotation() {
- if err := rp.parseDecl(tgt, sd); err != nil {
- return err
- }
- }
- }
- }
- }
- }
- }
- }
- }
- return nil
-}
-
-func (rp *responseParser) parseDecl(responses map[string]spec.Response, decl responseDecl) error {
- // check if there is a swagger:parameters tag that is followed by one or more words,
- // these words are the ids of the operations this parameter struct applies to
- // once type name is found convert it to a schema, by looking up the schema in the
- // parameters dictionary that got passed into this parse method
- response := responses[decl.Name]
- resPtr := &response
-
- // analyze doc comment for the model
- sp := new(sectionedParser)
- sp.setDescription = func(lines []string) { resPtr.Description = joinDropLast(lines) }
- if err := sp.Parse(decl.Decl.Doc); err != nil {
- return err
- }
-
- // analyze struct body for fields etc
- // each exported struct field:
- // * gets a type mapped to a go primitive
- // * perhaps gets a format
- // * has to document the validations that apply for the type and the field
- // * when the struct field points to a model it becomes a ref: #/definitions/ModelName
- // * comments that aren't tags is used as the description
- if tpe, ok := decl.TypeSpec.Type.(*ast.StructType); ok {
- if err := rp.parseStructType(decl.File, resPtr, tpe, make(map[string]struct{})); err != nil {
- return err
- }
- }
-
- responses[decl.Name] = response
- return nil
-}
-
-func (rp *responseParser) parseEmbeddedStruct(gofile *ast.File, response *spec.Response, expr ast.Expr, seenPreviously map[string]struct{}) error {
- switch tpe := expr.(type) {
- case *ast.Ident:
- // do lookup of type
- // take primitives into account, they should result in an error for swagger
- pkg, err := rp.scp.packageForFile(gofile, tpe)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- file, _, ts, err := findSourceFile(pkg, tpe.Name)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- if st, ok := ts.Type.(*ast.StructType); ok {
- return rp.parseStructType(file, response, st, seenPreviously)
- }
- case *ast.SelectorExpr:
- // look up package, file and then type
- pkg, err := rp.scp.packageForSelector(gofile, tpe.X)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- file, _, ts, err := findSourceFile(pkg, tpe.Sel.Name)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- if st, ok := ts.Type.(*ast.StructType); ok {
- return rp.parseStructType(file, response, st, seenPreviously)
- }
- case *ast.StarExpr:
- return rp.parseEmbeddedStruct(gofile, response, tpe.X, seenPreviously)
- }
- fmt.Printf("1%#v\n", expr)
- return fmt.Errorf("unable to resolve embedded struct for: %v", expr)
-}
-
-func (rp *responseParser) parseStructType(gofile *ast.File, response *spec.Response, tpe *ast.StructType, seenPreviously map[string]struct{}) error {
- if tpe.Fields != nil {
-
- seenProperties := seenPreviously
-
- for _, fld := range tpe.Fields.List {
- if len(fld.Names) == 0 {
- // when the embedded struct is annotated with swagger:allOf it will be used as allOf property
- // otherwise the fields will just be included as normal properties
- if err := rp.parseEmbeddedStruct(gofile, response, fld.Type, seenProperties); err != nil {
- return err
- }
- }
- }
-
- for _, fld := range tpe.Fields.List {
- if len(fld.Names) > 0 && fld.Names[0] != nil && fld.Names[0].IsExported() {
- nm, ignore, _, err := parseJSONTag(fld)
- if err != nil {
- return err
- }
- if ignore {
- continue
- }
-
- var in string
- // scan for param location first, this changes some behavior down the line
- if fld.Doc != nil {
- for _, cmt := range fld.Doc.List {
- for _, line := range strings.Split(cmt.Text, "\n") {
- matches := rxIn.FindStringSubmatch(line)
- if len(matches) > 0 && len(strings.TrimSpace(matches[1])) > 0 {
- in = strings.TrimSpace(matches[1])
- }
- }
- }
- }
-
- ps := response.Headers[nm]
-
- // support swagger:file for response
- // An API operation can return a file, such as an image or PDF. In this case,
- // define the response schema with type: file and specify the appropriate MIME types in the produces section.
- if fld.Doc != nil && fileParam(fld.Doc) {
- response.Schema = &spec.Schema{}
- response.Schema.Typed("file", "")
- } else if err := rp.scp.parseNamedType(gofile, fld.Type, responseTypable{in, &ps, response}); err != nil {
- return err
- }
-
- if strfmtName, ok := strfmtName(fld.Doc); ok {
- ps.Typed("string", strfmtName)
- }
-
- sp := new(sectionedParser)
- sp.setDescription = func(lines []string) { ps.Description = joinDropLast(lines) }
- sp.taggers = []tagParser{
- newSingleLineTagParser("maximum", &setMaximum{headerValidations{&ps}, rxf(rxMaximumFmt, "")}),
- newSingleLineTagParser("minimum", &setMinimum{headerValidations{&ps}, rxf(rxMinimumFmt, "")}),
- newSingleLineTagParser("multipleOf", &setMultipleOf{headerValidations{&ps}, rxf(rxMultipleOfFmt, "")}),
- newSingleLineTagParser("minLength", &setMinLength{headerValidations{&ps}, rxf(rxMinLengthFmt, "")}),
- newSingleLineTagParser("maxLength", &setMaxLength{headerValidations{&ps}, rxf(rxMaxLengthFmt, "")}),
- newSingleLineTagParser("pattern", &setPattern{headerValidations{&ps}, rxf(rxPatternFmt, "")}),
- newSingleLineTagParser("collectionFormat", &setCollectionFormat{headerValidations{&ps}, rxf(rxCollectionFormatFmt, "")}),
- newSingleLineTagParser("minItems", &setMinItems{headerValidations{&ps}, rxf(rxMinItemsFmt, "")}),
- newSingleLineTagParser("maxItems", &setMaxItems{headerValidations{&ps}, rxf(rxMaxItemsFmt, "")}),
- newSingleLineTagParser("unique", &setUnique{headerValidations{&ps}, rxf(rxUniqueFmt, "")}),
- newSingleLineTagParser("enum", &setEnum{headerValidations{&ps}, rxf(rxEnumFmt, "")}),
- newSingleLineTagParser("default", &setDefault{&ps.SimpleSchema, headerValidations{&ps}, rxf(rxDefaultFmt, "")}),
- newSingleLineTagParser("example", &setExample{&ps.SimpleSchema, headerValidations{&ps}, rxf(rxExampleFmt, "")}),
- }
- itemsTaggers := func(items *spec.Items, level int) []tagParser {
- // the expression is 1-index based not 0-index
- itemsPrefix := fmt.Sprintf(rxItemsPrefixFmt, level+1)
-
- return []tagParser{
- newSingleLineTagParser(fmt.Sprintf("items%dMaximum", level), &setMaximum{itemsValidations{items}, rxf(rxMaximumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinimum", level), &setMinimum{itemsValidations{items}, rxf(rxMinimumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMultipleOf", level), &setMultipleOf{itemsValidations{items}, rxf(rxMultipleOfFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinLength", level), &setMinLength{itemsValidations{items}, rxf(rxMinLengthFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMaxLength", level), &setMaxLength{itemsValidations{items}, rxf(rxMaxLengthFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dPattern", level), &setPattern{itemsValidations{items}, rxf(rxPatternFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dCollectionFormat", level), &setCollectionFormat{itemsValidations{items}, rxf(rxCollectionFormatFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinItems", level), &setMinItems{itemsValidations{items}, rxf(rxMinItemsFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMaxItems", level), &setMaxItems{itemsValidations{items}, rxf(rxMaxItemsFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dUnique", level), &setUnique{itemsValidations{items}, rxf(rxUniqueFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dEnum", level), &setEnum{itemsValidations{items}, rxf(rxEnumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dDefault", level), &setDefault{&items.SimpleSchema, itemsValidations{items}, rxf(rxDefaultFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dExample", level), &setExample{&items.SimpleSchema, itemsValidations{items}, rxf(rxExampleFmt, itemsPrefix)}),
- }
- }
-
- var parseArrayTypes func(expr ast.Expr, items *spec.Items, level int) ([]tagParser, error)
- parseArrayTypes = func(expr ast.Expr, items *spec.Items, level int) ([]tagParser, error) {
- if items == nil {
- return []tagParser{}, nil
- }
- switch iftpe := expr.(type) {
- case *ast.ArrayType:
- eleTaggers := itemsTaggers(items, level)
- sp.taggers = append(eleTaggers, sp.taggers...)
- otherTaggers, err := parseArrayTypes(iftpe.Elt, items.Items, level+1)
- if err != nil {
- return nil, err
- }
- return otherTaggers, nil
- case *ast.Ident:
- taggers := []tagParser{}
- if iftpe.Obj == nil {
- taggers = itemsTaggers(items, level)
- }
- otherTaggers, err := parseArrayTypes(expr, items.Items, level+1)
- if err != nil {
- return nil, err
- }
- return append(taggers, otherTaggers...), nil
- case *ast.StarExpr:
- otherTaggers, err := parseArrayTypes(iftpe.X, items, level)
- if err != nil {
- return nil, err
- }
- return otherTaggers, nil
- default:
- return nil, fmt.Errorf("unknown field type ele for %q", nm)
- }
- }
- // check if this is a primitive, if so parse the validations from the
- // doc comments of the slice declaration.
- if ftped, ok := fld.Type.(*ast.ArrayType); ok {
- taggers, err := parseArrayTypes(ftped.Elt, ps.Items, 0)
- if err != nil {
- return err
- }
- sp.taggers = append(taggers, sp.taggers...)
- }
-
- if err := sp.Parse(fld.Doc); err != nil {
- return err
- }
-
- if in != "body" {
- seenProperties[nm] = struct{}{}
- if response.Headers == nil {
- response.Headers = make(map[string]spec.Header)
- }
- response.Headers[nm] = ps
- }
- }
- }
-
- for k := range response.Headers {
- if _, ok := seenProperties[k]; !ok {
- delete(response.Headers, k)
- }
- }
- }
-
- return nil
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/route_params.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/route_params.go
deleted file mode 100644
index 6dd17f6b4e8..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/route_params.go
+++ /dev/null
@@ -1,253 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-package scan
-
-import (
- "errors"
- "strconv"
- "strings"
-
- "github.com/go-openapi/spec"
-)
-
-const (
- // ParamDescriptionKey indicates the tag used to define a parameter description in swagger:route
- ParamDescriptionKey = "description"
- // ParamNameKey indicates the tag used to define a parameter name in swagger:route
- ParamNameKey = "name"
- // ParamInKey indicates the tag used to define a parameter location in swagger:route
- ParamInKey = "in"
- // ParamRequiredKey indicates the tag used to declare whether a parameter is required in swagger:route
- ParamRequiredKey = "required"
- // ParamTypeKey indicates the tag used to define the parameter type in swagger:route
- ParamTypeKey = "type"
- // ParamAllowEmptyKey indicates the tag used to indicate whether a parameter allows empty values in swagger:route
- ParamAllowEmptyKey = "allowempty"
-
- // SchemaMinKey indicates the tag used to indicate the minimum value allowed for this type in swagger:route
- SchemaMinKey = "min"
- // SchemaMaxKey indicates the tag used to indicate the maximum value allowed for this type in swagger:route
- SchemaMaxKey = "max"
- // SchemaEnumKey indicates the tag used to specify the allowed values for this type in swagger:route
- SchemaEnumKey = "enum"
- // SchemaFormatKey indicates the expected format for this field in swagger:route
- SchemaFormatKey = "format"
- // SchemaDefaultKey indicates the default value for this field in swagger:route
- SchemaDefaultKey = "default"
- // SchemaMinLenKey indicates the minimum length this field in swagger:route
- SchemaMinLenKey = "minlength"
- // SchemaMaxLenKey indicates the minimum length this field in swagger:route
- SchemaMaxLenKey = "maxlength"
-
- // TypeArray is the identifier for an array type in swagger:route
- TypeArray = "array"
- // TypeNumber is the identifier for a number type in swagger:route
- TypeNumber = "number"
- // TypeInteger is the identifier for an integer type in swagger:route
- TypeInteger = "integer"
- // TypeBoolean is the identifier for a boolean type in swagger:route
- TypeBoolean = "boolean"
- // TypeBool is the identifier for a boolean type in swagger:route
- TypeBool = "bool"
- // TypeObject is the identifier for an object type in swagger:route
- TypeObject = "object"
- // TypeString is the identifier for a string type in swagger:route
- TypeString = "string"
-)
-
-var (
- validIn = []string{"path", "query", "header", "body", "form"}
- basicTypes = []string{TypeInteger, TypeNumber, TypeString, TypeBoolean, TypeBool, TypeArray}
-)
-
-func newSetParams(params []*spec.Parameter, setter func([]*spec.Parameter)) *setOpParams {
- return &setOpParams{
- set: setter,
- parameters: params,
- }
-}
-
-type setOpParams struct {
- set func([]*spec.Parameter)
- parameters []*spec.Parameter
-}
-
-func (s *setOpParams) Matches(line string) bool {
- return rxParameters.MatchString(line)
-}
-
-func (s *setOpParams) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
-
- var current *spec.Parameter
- var extraData map[string]string
-
- for _, line := range lines {
- l := strings.TrimSpace(line)
-
- if strings.HasPrefix(l, "+") {
- s.finalizeParam(current, extraData)
- current = new(spec.Parameter)
- extraData = make(map[string]string)
- l = strings.TrimPrefix(l, "+")
- }
-
- kv := strings.SplitN(l, ":", 2)
-
- if len(kv) <= 1 {
- continue
- }
-
- key := strings.ToLower(strings.TrimSpace(kv[0]))
- value := strings.TrimSpace(kv[1])
-
- if current == nil {
- return errors.New("invalid route/operation schema provided")
- }
-
- switch key {
- case ParamDescriptionKey:
- current.Description = value
- case ParamNameKey:
- current.Name = value
- case ParamInKey:
- v := strings.ToLower(value)
- if contains(validIn, v) {
- current.In = v
- }
- case ParamRequiredKey:
- if v, err := strconv.ParseBool(value); err == nil {
- current.Required = v
- }
- case ParamTypeKey:
- if current.Schema == nil {
- current.Schema = new(spec.Schema)
- }
- if contains(basicTypes, value) {
- current.Type = strings.ToLower(value)
- if current.Type == TypeBool {
- current.Type = TypeBoolean
- }
- } else {
- if ref, err := spec.NewRef("#/definitions/" + value); err == nil {
- current.Type = TypeObject
- current.Schema.Ref = ref
- }
- }
- current.Schema.Type = spec.StringOrArray{current.Type}
- case ParamAllowEmptyKey:
- if v, err := strconv.ParseBool(value); err == nil {
- current.AllowEmptyValue = v
- }
- default:
- extraData[key] = value
- }
- }
-
- s.finalizeParam(current, extraData)
- s.set(s.parameters)
- return nil
-}
-
-func (s *setOpParams) finalizeParam(param *spec.Parameter, data map[string]string) {
- if param == nil {
- return
- }
-
- processSchema(data, param)
- s.parameters = append(s.parameters, param)
-}
-
-func processSchema(data map[string]string, param *spec.Parameter) {
- if param.Schema == nil {
- return
- }
-
- var enumValues []string
-
- for key, value := range data {
- switch key {
- case SchemaMinKey:
- if t := getType(param.Schema); t == TypeNumber || t == TypeInteger {
- v, _ := strconv.ParseFloat(value, 64)
- param.Schema.Minimum = &v
- }
- case SchemaMaxKey:
- if t := getType(param.Schema); t == TypeNumber || t == TypeInteger {
- v, _ := strconv.ParseFloat(value, 64)
- param.Schema.Maximum = &v
- }
- case SchemaMinLenKey:
- if getType(param.Schema) == TypeArray {
- v, _ := strconv.ParseInt(value, 10, 64)
- param.Schema.MinLength = &v
- }
- case SchemaMaxLenKey:
- if getType(param.Schema) == TypeArray {
- v, _ := strconv.ParseInt(value, 10, 64)
- param.Schema.MaxLength = &v
- }
- case SchemaEnumKey:
- enumValues = strings.Split(value, ",")
- case SchemaFormatKey:
- param.Schema.Format = value
- case SchemaDefaultKey:
- param.Schema.Default = convert(param.Type, value)
- }
- }
-
- if param.Description != "" {
- param.Schema.Description = param.Description
- }
-
- convertEnum(param.Schema, enumValues)
-}
-
-func convertEnum(schema *spec.Schema, enumValues []string) {
- if len(enumValues) == 0 {
- return
- }
-
- var finalEnum []interface{}
- for _, v := range enumValues {
- finalEnum = append(finalEnum, convert(schema.Type[0], strings.TrimSpace(v)))
- }
- schema.Enum = finalEnum
-}
-
-func convert(typeStr, valueStr string) interface{} {
- switch typeStr {
- case TypeInteger:
- fallthrough
- case TypeNumber:
- if num, err := strconv.ParseFloat(valueStr, 64); err == nil {
- return num
- }
- case TypeBoolean:
- fallthrough
- case TypeBool:
- if b, err := strconv.ParseBool(valueStr); err == nil {
- return b
- }
- }
- return valueStr
-}
-
-func getType(schema *spec.Schema) string {
- if len(schema.Type) == 0 {
- return ""
- }
- return schema.Type[0]
-}
-
-func contains(arr []string, obj string) bool {
- for _, v := range arr {
- if v == obj {
- return true
- }
- }
- return false
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/routes.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/routes.go
deleted file mode 100644
index 644d61900fa..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/routes.go
+++ /dev/null
@@ -1,146 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "fmt"
- "go/ast"
-
- "github.com/go-openapi/spec"
-
- "golang.org/x/tools/go/loader"
-)
-
-func opConsumesSetter(op *spec.Operation) func([]string) {
- return func(consumes []string) { op.Consumes = consumes }
-}
-
-func opProducesSetter(op *spec.Operation) func([]string) {
- return func(produces []string) { op.Produces = produces }
-}
-
-func opSchemeSetter(op *spec.Operation) func([]string) {
- return func(schemes []string) { op.Schemes = schemes }
-}
-
-func opSecurityDefsSetter(op *spec.Operation) func([]map[string][]string) {
- return func(securityDefs []map[string][]string) { op.Security = securityDefs }
-}
-
-func opResponsesSetter(op *spec.Operation) func(*spec.Response, map[int]spec.Response) {
- return func(def *spec.Response, scr map[int]spec.Response) {
- if op.Responses == nil {
- op.Responses = new(spec.Responses)
- }
- op.Responses.Default = def
- op.Responses.StatusCodeResponses = scr
- }
-}
-
-func opParamSetter(op *spec.Operation) func([]*spec.Parameter) {
- return func(params []*spec.Parameter) {
- for _, v := range params {
- op.AddParam(v)
- }
- }
-}
-
-func newRoutesParser(prog *loader.Program) *routesParser {
- return &routesParser{
- program: prog,
- }
-}
-
-type routesParser struct {
- program *loader.Program
- definitions map[string]spec.Schema
- operations map[string]*spec.Operation
- responses map[string]spec.Response
- parameters []*spec.Parameter
-}
-
-var routeVendorExtensibleParser = vendorExtensibleParser{
- setExtensions: func(ext spec.Extensions, dest interface{}) {
- dest.(*spec.Operation).Extensions = ext
- },
-}
-
-func (rp *routesParser) Parse(gofile *ast.File, target interface{}, includeTags map[string]bool, excludeTags map[string]bool) error {
- tgt := target.(*spec.Paths)
- for _, comsec := range gofile.Comments {
- content := parsePathAnnotation(rxRoute, comsec.List)
-
- if content.Method == "" {
- continue // it's not, next!
- }
-
- if !shouldAcceptTag(content.Tags, includeTags, excludeTags) {
- if Debug {
- fmt.Printf("route %s %s is ignored due to tag rules\n", content.Method, content.Path)
- }
- continue
- }
-
- pthObj := tgt.Paths[content.Path]
- op := setPathOperation(
- content.Method, content.ID,
- &pthObj, rp.operations[content.ID])
-
- op.Tags = content.Tags
-
- sp := new(sectionedParser)
- sp.setTitle = func(lines []string) { op.Summary = joinDropLast(lines) }
- sp.setDescription = func(lines []string) { op.Description = joinDropLast(lines) }
- sr := newSetResponses(rp.definitions, rp.responses, opResponsesSetter(op))
- spa := newSetParams(rp.parameters, opParamSetter(op))
- sp.taggers = []tagParser{
- newMultiLineTagParser("Consumes", newMultilineDropEmptyParser(rxConsumes, opConsumesSetter(op)), false),
- newMultiLineTagParser("Produces", newMultilineDropEmptyParser(rxProduces, opProducesSetter(op)), false),
- newSingleLineTagParser("Schemes", newSetSchemes(opSchemeSetter(op))),
- newMultiLineTagParser("Security", newSetSecurity(rxSecuritySchemes, opSecurityDefsSetter(op)), false),
- newMultiLineTagParser("Parameters", spa, false),
- newMultiLineTagParser("Responses", sr, false),
- newMultiLineTagParser("YAMLExtensionsBlock", newYamlParser(rxExtensions, routeVendorExtensibleParser.ParseInto(op)), true),
- }
- if err := sp.Parse(content.Remaining); err != nil {
- return fmt.Errorf("operation (%s): %v", op.ID, err)
- }
-
- if tgt.Paths == nil {
- tgt.Paths = make(map[string]spec.PathItem)
- }
- tgt.Paths[content.Path] = pthObj
- }
-
- return nil
-}
-
-func shouldAcceptTag(tags []string, includeTags map[string]bool, excludeTags map[string]bool) bool {
- for _, tag := range tags {
- if len(includeTags) > 0 {
- if includeTags[tag] {
- return true
- }
- } else if len(excludeTags) > 0 {
- if excludeTags[tag] {
- return false
- }
- }
- }
- return len(includeTags) <= 0
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/scanner.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/scanner.go
deleted file mode 100644
index b0761673501..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/scanner.go
+++ /dev/null
@@ -1,974 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "go/ast"
- "go/build"
- goparser "go/parser"
- "go/types"
- "log"
- "os"
- "regexp"
- "strings"
-
- "github.com/go-openapi/loads/fmts"
- "github.com/go-openapi/spec"
- "github.com/go-openapi/swag"
- "golang.org/x/tools/go/loader"
- yaml "gopkg.in/yaml.v3"
-)
-
-const (
- rxMethod = "(\\p{L}+)"
- rxPath = "((?:/[\\p{L}\\p{N}\\p{Pd}\\p{Pc}{}\\-\\.\\?_~%!$&'()*+,;=:@/]*)+/?)"
- rxOpTags = "(\\p{L}[\\p{L}\\p{N}\\p{Pd}\\.\\p{Pc}\\p{Zs}]+)"
- rxOpID = "((?:\\p{L}[\\p{L}\\p{N}\\p{Pd}\\p{Pc}]+)+)"
-
- rxMaximumFmt = "%s[Mm]ax(?:imum)?\\p{Zs}*:\\p{Zs}*([\\<=])?\\p{Zs}*([\\+-]?(?:\\p{N}+\\.)?\\p{N}+)$"
- rxMinimumFmt = "%s[Mm]in(?:imum)?\\p{Zs}*:\\p{Zs}*([\\>=])?\\p{Zs}*([\\+-]?(?:\\p{N}+\\.)?\\p{N}+)$"
- rxMultipleOfFmt = "%s[Mm]ultiple\\p{Zs}*[Oo]f\\p{Zs}*:\\p{Zs}*([\\+-]?(?:\\p{N}+\\.)?\\p{N}+)$"
-
- rxMaxLengthFmt = "%s[Mm]ax(?:imum)?(?:\\p{Zs}*[\\p{Pd}\\p{Pc}]?[Ll]en(?:gth)?)\\p{Zs}*:\\p{Zs}*(\\p{N}+)$"
- rxMinLengthFmt = "%s[Mm]in(?:imum)?(?:\\p{Zs}*[\\p{Pd}\\p{Pc}]?[Ll]en(?:gth)?)\\p{Zs}*:\\p{Zs}*(\\p{N}+)$"
- rxPatternFmt = "%s[Pp]attern\\p{Zs}*:\\p{Zs}*(.*)$"
- rxCollectionFormatFmt = "%s[Cc]ollection(?:\\p{Zs}*[\\p{Pd}\\p{Pc}]?[Ff]ormat)\\p{Zs}*:\\p{Zs}*(.*)$"
- rxEnumFmt = "%s[Ee]num\\p{Zs}*:\\p{Zs}*(.*)$"
- rxDefaultFmt = "%s[Dd]efault\\p{Zs}*:\\p{Zs}*(.*)$"
- rxExampleFmt = "%s[Ee]xample\\p{Zs}*:\\p{Zs}*(.*)$"
-
- rxMaxItemsFmt = "%s[Mm]ax(?:imum)?(?:\\p{Zs}*|[\\p{Pd}\\p{Pc}]|\\.)?[Ii]tems\\p{Zs}*:\\p{Zs}*(\\p{N}+)$"
- rxMinItemsFmt = "%s[Mm]in(?:imum)?(?:\\p{Zs}*|[\\p{Pd}\\p{Pc}]|\\.)?[Ii]tems\\p{Zs}*:\\p{Zs}*(\\p{N}+)$"
- rxUniqueFmt = "%s[Uu]nique\\p{Zs}*:\\p{Zs}*(true|false)$"
-
- rxItemsPrefixFmt = "(?:[Ii]tems[\\.\\p{Zs}]*){%d}"
-)
-
-var (
- rxSwaggerAnnotation = regexp.MustCompile(`swagger:([\p{L}\p{N}\p{Pd}\p{Pc}]+)`)
- rxFileUpload = regexp.MustCompile(`swagger:file`)
- rxStrFmt = regexp.MustCompile(`swagger:strfmt\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}]+)$`)
- rxAlias = regexp.MustCompile(`swagger:alias`)
- rxName = regexp.MustCompile(`swagger:name\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}\.]+)$`)
- rxAllOf = regexp.MustCompile(`swagger:allOf\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}\.]+)?$`)
- rxModelOverride = regexp.MustCompile(`swagger:model\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}]+)?$`)
- rxResponseOverride = regexp.MustCompile(`swagger:response\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}]+)?$`)
- rxParametersOverride = regexp.MustCompile(`swagger:parameters\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}\p{Zs}]+)$`)
- rxEnum = regexp.MustCompile(`swagger:enum\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}]+)$`)
- rxIgnoreOverride = regexp.MustCompile(`swagger:ignore\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}]+)?$`)
- rxDefault = regexp.MustCompile(`swagger:default\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}]+)$`)
- rxType = regexp.MustCompile(`swagger:type\p{Zs}*(\p{L}[\p{L}\p{N}\p{Pd}\p{Pc}]+)$`)
- rxRoute = regexp.MustCompile(
- "swagger:route\\p{Zs}*" +
- rxMethod +
- "\\p{Zs}*" +
- rxPath +
- "(?:\\p{Zs}+" +
- rxOpTags +
- ")?\\p{Zs}+" +
- rxOpID + "\\p{Zs}*$")
- rxBeginYAMLSpec = regexp.MustCompile(`---\p{Zs}*$`)
- rxUncommentHeaders = regexp.MustCompile(`^[\p{Zs}\t/\*-]*\|?`)
- rxUncommentYAML = regexp.MustCompile(`^[\p{Zs}\t]*/*`)
- rxOperation = regexp.MustCompile(
- "swagger:operation\\p{Zs}*" +
- rxMethod +
- "\\p{Zs}*" +
- rxPath +
- "(?:\\p{Zs}+" +
- rxOpTags +
- ")?\\p{Zs}+" +
- rxOpID + "\\p{Zs}*$")
-
- rxSpace = regexp.MustCompile(`\p{Zs}+`)
- rxIndent = regexp.MustCompile(`\p{Zs}*/*\p{Zs}*[^\p{Zs}]`)
- rxPunctuationEnd = regexp.MustCompile(`\p{Po}$`)
- rxStripComments = regexp.MustCompile(`^[^\p{L}\p{N}\p{Pd}\p{Pc}\+]*`)
- rxStripTitleComments = regexp.MustCompile(`^[^\p{L}]*[Pp]ackage\p{Zs}+[^\p{Zs}]+\p{Zs}*`)
- rxAllowedExtensions = regexp.MustCompile(`^[Xx]-`)
-
- rxIn = regexp.MustCompile(`[Ii]n\p{Zs}*:\p{Zs}*(query|path|header|body|formData)$`)
- rxRequired = regexp.MustCompile(`[Rr]equired\p{Zs}*:\p{Zs}*(true|false)$`)
- rxDiscriminator = regexp.MustCompile(`[Dd]iscriminator\p{Zs}*:\p{Zs}*(true|false)$`)
- rxReadOnly = regexp.MustCompile(`[Rr]ead(?:\p{Zs}*|[\p{Pd}\p{Pc}])?[Oo]nly\p{Zs}*:\p{Zs}*(true|false)$`)
- rxConsumes = regexp.MustCompile(`[Cc]onsumes\p{Zs}*:`)
- rxProduces = regexp.MustCompile(`[Pp]roduces\p{Zs}*:`)
- rxSecuritySchemes = regexp.MustCompile(`[Ss]ecurity\p{Zs}*:`)
- rxSecurity = regexp.MustCompile(`[Ss]ecurity\p{Zs}*[Dd]efinitions:`)
- rxResponses = regexp.MustCompile(`[Rr]esponses\p{Zs}*:`)
- rxParameters = regexp.MustCompile(`[Pp]arameters\p{Zs}*:`)
- rxSchemes = regexp.MustCompile(`[Ss]chemes\p{Zs}*:\p{Zs}*((?:(?:https?|HTTPS?|wss?|WSS?)[\p{Zs},]*)+)$`)
- rxVersion = regexp.MustCompile(`[Vv]ersion\p{Zs}*:\p{Zs}*(.+)$`)
- rxHost = regexp.MustCompile(`[Hh]ost\p{Zs}*:\p{Zs}*(.+)$`)
- rxBasePath = regexp.MustCompile(`[Bb]ase\p{Zs}*-*[Pp]ath\p{Zs}*:\p{Zs}*` + rxPath + "$")
- rxLicense = regexp.MustCompile(`[Ll]icense\p{Zs}*:\p{Zs}*(.+)$`)
- rxContact = regexp.MustCompile(`[Cc]ontact\p{Zs}*-?(?:[Ii]info\p{Zs}*)?:\p{Zs}*(.+)$`)
- rxTOS = regexp.MustCompile(`[Tt](:?erms)?\p{Zs}*-?[Oo]f?\p{Zs}*-?[Ss](?:ervice)?\p{Zs}*:`)
- rxExtensions = regexp.MustCompile(`[Ee]xtensions\p{Zs}*:`)
- rxInfoExtensions = regexp.MustCompile(`[In]nfo\p{Zs}*[Ee]xtensions:`)
- // currently unused: rxExample = regexp.MustCompile(`[Ex]ample\p{Zs}*:\p{Zs}*(.*)$`)
-)
-
-// Many thanks go to https://github.com/yvasiyarov/swagger
-// this is loosely based on that implementation but for swagger 2.0
-
-func joinDropLast(lines []string) string {
- l := len(lines)
- lns := lines
- if l > 0 && len(strings.TrimSpace(lines[l-1])) == 0 {
- lns = lines[:l-1]
- }
- return strings.Join(lns, "\n")
-}
-
-func removeEmptyLines(lines []string) (notEmpty []string) {
- for _, l := range lines {
- if len(strings.TrimSpace(l)) > 0 {
- notEmpty = append(notEmpty, l)
- }
- }
- return
-}
-
-func rxf(rxp, ar string) *regexp.Regexp {
- return regexp.MustCompile(fmt.Sprintf(rxp, ar))
-}
-
-// The Opts for the application scanner.
-type Opts struct {
- BasePath string
- Input *spec.Swagger
- ScanModels bool
- BuildTags string
- Include []string
- Exclude []string
- IncludeTags []string
- ExcludeTags []string
-}
-
-func safeConvert(str string) bool {
- b, err := swag.ConvertBool(str)
- if err != nil {
- return false
- }
- return b
-}
-
-// Debug is true when process is run with DEBUG=1 env var
-var Debug = safeConvert(os.Getenv("DEBUG"))
-
-// Application scans the application and builds a swagger spec based on the information from the code files.
-// When there are includes provided, only those files are considered for the initial discovery.
-// Similarly the excludes will exclude an item from initial discovery through scanning for annotations.
-// When something in the discovered items requires a type that is contained in the includes or excludes it will still be
-// in the spec.
-func Application(opts Opts) (*spec.Swagger, error) {
- parser, err := newAppScanner(&opts)
-
- if err != nil {
- return nil, err
- }
- return parser.Parse()
-}
-
-// appScanner the global context for scanning a go application
-// into a swagger specification
-type appScanner struct {
- loader *loader.Config
- prog *loader.Program
- classifier *programClassifier
- discovered []schemaDecl
- input *spec.Swagger
- definitions map[string]spec.Schema
- responses map[string]spec.Response
- operations map[string]*spec.Operation
- scanModels bool
- includeTags map[string]bool
- excludeTas map[string]bool
-
- // MainPackage the path to find the main class in
- MainPackage string
-}
-
-// newAppScanner creates a new api parser
-func newAppScanner(opts *Opts) (*appScanner, error) {
- if Debug {
- log.Println("scanning packages discovered through entrypoint @ ", opts.BasePath)
- }
- var ldr loader.Config
- ldr.ParserMode = goparser.ParseComments
- ldr.Import(opts.BasePath)
- if opts.BuildTags != "" {
- ldr.Build = &build.Default
- ldr.Build.BuildTags = strings.Split(opts.BuildTags, ",")
- }
- ldr.TypeChecker = types.Config{FakeImportC: true}
- prog, err := ldr.Load()
- if err != nil {
- return nil, err
- }
-
- var includes, excludes packageFilters
- if len(opts.Include) > 0 {
- for _, include := range opts.Include {
- includes = append(includes, packageFilter{Name: include})
- }
- }
- if len(opts.Exclude) > 0 {
- for _, exclude := range opts.Exclude {
- excludes = append(excludes, packageFilter{Name: exclude})
- }
- }
- includeTags := make(map[string]bool)
- for _, includeTag := range opts.IncludeTags {
- includeTags[includeTag] = true
- }
- excludeTags := make(map[string]bool)
- for _, excludeTag := range opts.ExcludeTags {
- excludeTags[excludeTag] = true
- }
-
- input := opts.Input
- if input == nil {
- input = new(spec.Swagger)
- input.Swagger = "2.0"
- }
-
- if input.Paths == nil {
- input.Paths = new(spec.Paths)
- }
- if input.Definitions == nil {
- input.Definitions = make(map[string]spec.Schema)
- }
- if input.Responses == nil {
- input.Responses = make(map[string]spec.Response)
- }
- if input.Extensions == nil {
- input.Extensions = make(spec.Extensions)
- }
-
- return &appScanner{
- MainPackage: opts.BasePath,
- prog: prog,
- input: input,
- loader: &ldr,
- operations: collectOperationsFromInput(input),
- definitions: input.Definitions,
- responses: input.Responses,
- scanModels: opts.ScanModels,
- classifier: &programClassifier{
- Includes: includes,
- Excludes: excludes,
- },
- includeTags: includeTags,
- excludeTas: excludeTags,
- }, nil
-}
-
-func collectOperationsFromInput(input *spec.Swagger) map[string]*spec.Operation {
- operations := make(map[string]*spec.Operation)
- if input != nil && input.Paths != nil {
- for _, pth := range input.Paths.Paths {
- if pth.Get != nil {
- operations[pth.Get.ID] = pth.Get
- }
- if pth.Post != nil {
- operations[pth.Post.ID] = pth.Post
- }
- if pth.Put != nil {
- operations[pth.Put.ID] = pth.Put
- }
- if pth.Patch != nil {
- operations[pth.Patch.ID] = pth.Patch
- }
- if pth.Delete != nil {
- operations[pth.Delete.ID] = pth.Delete
- }
- if pth.Head != nil {
- operations[pth.Head.ID] = pth.Head
- }
- if pth.Options != nil {
- operations[pth.Options.ID] = pth.Options
- }
- }
- }
- return operations
-}
-
-// Parse produces a swagger object for an application
-func (a *appScanner) Parse() (*spec.Swagger, error) {
- // classification still includes files that are completely commented out
- cp, err := a.classifier.Classify(a.prog)
- if err != nil {
- return nil, err
- }
-
- // build models dictionary
- if a.scanModels {
- for _, modelsFile := range cp.Models {
- if err := a.parseSchema(modelsFile); err != nil {
- return nil, err
- }
- }
- }
-
- // build parameters dictionary
- for _, paramsFile := range cp.Parameters {
- if err := a.parseParameters(paramsFile); err != nil {
- return nil, err
- }
- }
-
- // build responses dictionary
- for _, responseFile := range cp.Responses {
- if err := a.parseResponses(responseFile); err != nil {
- return nil, err
- }
- }
-
- // build definitions dictionary
- if err := a.processDiscovered(); err != nil {
- return nil, err
- }
-
- // build paths dictionary
- for _, routeFile := range cp.Routes {
- if err := a.parseRoutes(routeFile); err != nil {
- return nil, err
- }
- }
- for _, operationFile := range cp.Operations {
- if err := a.parseOperations(operationFile); err != nil {
- return nil, err
- }
- }
-
- // build swagger object
- for _, metaFile := range cp.Meta {
- if err := a.parseMeta(metaFile); err != nil {
- return nil, err
- }
- }
-
- if a.input.Swagger == "" {
- a.input.Swagger = "2.0"
- }
-
- return a.input, nil
-}
-
-func (a *appScanner) processDiscovered() error {
- // loop over discovered until all the items are in definitions
- keepGoing := len(a.discovered) > 0
- for keepGoing {
- var queue []schemaDecl
- for _, d := range a.discovered {
- if _, ok := a.definitions[d.Name]; !ok {
- queue = append(queue, d)
- }
- }
- a.discovered = nil
- for _, sd := range queue {
- if err := a.parseDiscoveredSchema(sd); err != nil {
- return err
- }
- }
- keepGoing = len(a.discovered) > 0
- }
-
- return nil
-}
-
-func (a *appScanner) parseSchema(file *ast.File) error {
- sp := newSchemaParser(a.prog)
- if err := sp.Parse(file, a.definitions); err != nil {
- return err
- }
- a.discovered = append(a.discovered, sp.postDecls...)
- return nil
-}
-
-func (a *appScanner) parseDiscoveredSchema(sd schemaDecl) error {
- sp := newSchemaParser(a.prog)
- sp.discovered = &sd
-
- if err := sp.Parse(sd.File, a.definitions); err != nil {
- return err
- }
- a.discovered = append(a.discovered, sp.postDecls...)
- return nil
-}
-
-func (a *appScanner) parseRoutes(file *ast.File) error {
- rp := newRoutesParser(a.prog)
- rp.operations = a.operations
- rp.definitions = a.definitions
- rp.responses = a.responses
-
- return rp.Parse(file, a.input.Paths, a.includeTags, a.excludeTas)
-}
-
-func (a *appScanner) parseOperations(file *ast.File) error {
- op := newOperationsParser(a.prog)
- op.operations = a.operations
- op.definitions = a.definitions
- op.responses = a.responses
- return op.Parse(file, a.input.Paths, a.includeTags, a.excludeTas)
-}
-
-func (a *appScanner) parseParameters(file *ast.File) error {
- rp := newParameterParser(a.prog)
- if err := rp.Parse(file, a.operations); err != nil {
- return err
- }
- a.discovered = append(a.discovered, rp.postDecls...)
- a.discovered = append(a.discovered, rp.scp.postDecls...)
- return nil
-}
-
-func (a *appScanner) parseResponses(file *ast.File) error {
- rp := newResponseParser(a.prog)
- if err := rp.Parse(file, a.responses); err != nil {
- return err
- }
- a.discovered = append(a.discovered, rp.postDecls...)
- a.discovered = append(a.discovered, rp.scp.postDecls...)
- return nil
-}
-
-func (a *appScanner) parseMeta(file *ast.File) error {
- return newMetaParser(a.input).Parse(file.Doc)
-}
-
-// MustExpandPackagePath gets the real package path on disk
-func (a *appScanner) MustExpandPackagePath(packagePath string) string {
- pkgRealpath := swag.FindInGoSearchPath(packagePath)
- if pkgRealpath == "" {
- log.Fatalf("Can't find package %s \n", packagePath)
- }
-
- return pkgRealpath
-}
-
-type swaggerTypable interface {
- Typed(string, string)
- SetRef(spec.Ref)
- Items() swaggerTypable
- WithEnum(...interface{})
- Schema() *spec.Schema
- Level() int
-}
-
-// Map all Go builtin types that have Json representation to Swagger/Json types.
-// See https://golang.org/pkg/builtin/ and http://swagger.io/specification/
-func swaggerSchemaForType(typeName string, prop swaggerTypable) error {
- switch typeName {
- case "bool":
- prop.Typed("boolean", "")
- case "byte":
- prop.Typed("integer", "uint8")
- case "complex128", "complex64":
- return fmt.Errorf("unsupported builtin %q (no JSON marshaller)", typeName)
- case "error":
- // TODO: error is often marshalled into a string but not always (e.g. errors package creates
- // errors that are marshalled into an empty object), this could be handled the same way
- // custom JSON marshallers are handled (in future)
- prop.Typed("string", "")
- case "float32":
- prop.Typed("number", "float")
- case "float64":
- prop.Typed("number", "double")
- case "int":
- prop.Typed("integer", "int64")
- case "int16":
- prop.Typed("integer", "int16")
- case "int32":
- prop.Typed("integer", "int32")
- case "int64":
- prop.Typed("integer", "int64")
- case "int8":
- prop.Typed("integer", "int8")
- case "rune":
- prop.Typed("integer", "int32")
- case "string":
- prop.Typed("string", "")
- case "uint":
- prop.Typed("integer", "uint64")
- case "uint16":
- prop.Typed("integer", "uint16")
- case "uint32":
- prop.Typed("integer", "uint32")
- case "uint64":
- prop.Typed("integer", "uint64")
- case "uint8":
- prop.Typed("integer", "uint8")
- case "uintptr":
- prop.Typed("integer", "uint64")
- default:
- return fmt.Errorf("unsupported type %q", typeName)
- }
- return nil
-}
-
-func newMultiLineTagParser(name string, parser valueParser, skipCleanUp bool) tagParser {
- return tagParser{
- Name: name,
- MultiLine: true,
- SkipCleanUp: skipCleanUp,
- Parser: parser,
- }
-}
-
-func newSingleLineTagParser(name string, parser valueParser) tagParser {
- return tagParser{
- Name: name,
- MultiLine: false,
- SkipCleanUp: false,
- Parser: parser,
- }
-}
-
-type tagParser struct {
- Name string
- MultiLine bool
- SkipCleanUp bool
- Lines []string
- Parser valueParser
-}
-
-func (st *tagParser) Matches(line string) bool {
- return st.Parser.Matches(line)
-}
-
-func (st *tagParser) Parse(lines []string) error {
- return st.Parser.Parse(lines)
-}
-
-func newYamlParser(rx *regexp.Regexp, setter func(json.RawMessage) error) valueParser {
- return &yamlParser{
- set: setter,
- rx: rx,
- }
-}
-
-type yamlParser struct {
- set func(json.RawMessage) error
- rx *regexp.Regexp
-}
-
-func (y *yamlParser) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
-
- var uncommented []string
- uncommented = append(uncommented, removeYamlIndent(lines)...)
-
- yamlContent := strings.Join(uncommented, "\n")
- var yamlValue interface{}
- err := yaml.Unmarshal([]byte(yamlContent), &yamlValue)
- if err != nil {
- return err
- }
-
- var jsonValue json.RawMessage
- jsonValue, err = fmts.YAMLToJSON(yamlValue)
- if err != nil {
- return err
- }
-
- return y.set(jsonValue)
-}
-
-func (y *yamlParser) Matches(line string) bool {
- return y.rx.MatchString(line)
-}
-
-// aggregates lines in header until it sees `---`,
-// the beginning of a YAML spec
-type yamlSpecScanner struct {
- header []string
- yamlSpec []string
- setTitle func([]string)
- setDescription func([]string)
- workedOutTitle bool
- title []string
- skipHeader bool
-}
-
-func cleanupScannerLines(lines []string, ur *regexp.Regexp, yamlBlock *regexp.Regexp) []string {
- // bail early when there is nothing to parse
- if len(lines) == 0 {
- return lines
- }
- seenLine := -1
- var lastContent int
- var uncommented []string
- var startBlock bool
- var yaml []string
- for i, v := range lines {
- if yamlBlock != nil && yamlBlock.MatchString(v) && !startBlock {
- startBlock = true
- if seenLine < 0 {
- seenLine = i
- }
- continue
- }
- if startBlock {
- if yamlBlock.MatchString(v) {
- startBlock = false
- uncommented = append(uncommented, removeIndent(yaml)...)
- continue
- }
- yaml = append(yaml, v)
- if v != "" {
- if seenLine < 0 {
- seenLine = i
- }
- lastContent = i
- }
- continue
- }
- str := ur.ReplaceAllString(v, "")
- uncommented = append(uncommented, str)
- if str != "" {
- if seenLine < 0 {
- seenLine = i
- }
- lastContent = i
- }
- }
-
- // fixes issue #50
- if seenLine == -1 {
- return nil
- }
- return uncommented[seenLine : lastContent+1]
-}
-
-// a shared function that can be used to split given headers
-// into a title and description
-func collectScannerTitleDescription(headers []string) (title, desc []string) {
- hdrs := cleanupScannerLines(headers, rxUncommentHeaders, nil)
-
- idx := -1
- for i, line := range hdrs {
- if strings.TrimSpace(line) == "" {
- idx = i
- break
- }
- }
-
- if idx > -1 {
- title = hdrs[:idx]
- if len(hdrs) > idx+1 {
- desc = hdrs[idx+1:]
- } else {
- desc = nil
- }
- return
- }
-
- if len(hdrs) > 0 {
- line := hdrs[0]
- if rxPunctuationEnd.MatchString(line) {
- title = []string{line}
- desc = hdrs[1:]
- } else {
- desc = hdrs
- }
- }
-
- return
-}
-
-func (sp *yamlSpecScanner) collectTitleDescription() {
- if sp.workedOutTitle {
- return
- }
- if sp.setTitle == nil {
- sp.header = cleanupScannerLines(sp.header, rxUncommentHeaders, nil)
- return
- }
-
- sp.workedOutTitle = true
- sp.title, sp.header = collectScannerTitleDescription(sp.header)
-}
-
-func (sp *yamlSpecScanner) Title() []string {
- sp.collectTitleDescription()
- return sp.title
-}
-
-func (sp *yamlSpecScanner) Description() []string {
- sp.collectTitleDescription()
- return sp.header
-}
-
-func (sp *yamlSpecScanner) Parse(doc *ast.CommentGroup) error {
- if doc == nil {
- return nil
- }
- var startedYAMLSpec bool
-COMMENTS:
- for _, c := range doc.List {
- for _, line := range strings.Split(c.Text, "\n") {
- if rxSwaggerAnnotation.MatchString(line) {
- break COMMENTS // a new swagger: annotation terminates this parser
- }
-
- if !startedYAMLSpec {
- if rxBeginYAMLSpec.MatchString(line) {
- startedYAMLSpec = true
- sp.yamlSpec = append(sp.yamlSpec, line)
- continue
- }
-
- if !sp.skipHeader {
- sp.header = append(sp.header, line)
- }
-
- // no YAML spec yet, moving on
- continue
- }
-
- sp.yamlSpec = append(sp.yamlSpec, line)
- }
- }
- if sp.setTitle != nil {
- sp.setTitle(sp.Title())
- }
- if sp.setDescription != nil {
- sp.setDescription(sp.Description())
- }
- return nil
-}
-
-func (sp *yamlSpecScanner) UnmarshalSpec(u func([]byte) error) (err error) {
- spec := cleanupScannerLines(sp.yamlSpec, rxUncommentYAML, nil)
- if len(spec) == 0 {
- return errors.New("no spec available to unmarshal")
- }
-
- if !strings.Contains(spec[0], "---") {
- return errors.New("yaml spec has to start with `---`")
- }
-
- // remove indentation
- spec = removeIndent(spec)
-
- // 1. parse yaml lines
- yamlValue := make(map[interface{}]interface{})
-
- yamlContent := strings.Join(spec, "\n")
- err = yaml.Unmarshal([]byte(yamlContent), &yamlValue)
- if err != nil {
- return
- }
-
- // 2. convert to json
- var jsonValue json.RawMessage
- jsonValue, err = fmts.YAMLToJSON(yamlValue)
- if err != nil {
- return
- }
-
- // 3. unmarshal the json into an interface
- var data []byte
- data, err = jsonValue.MarshalJSON()
- if err != nil {
- return
- }
- err = u(data)
- if err != nil {
- return
- }
-
- // all parsed, returning...
- sp.yamlSpec = nil // spec is now consumed, so let's erase the parsed lines
- return
-}
-
-// removes indent base on the first line
-func removeIndent(spec []string) []string {
- loc := rxIndent.FindStringIndex(spec[0])
- if loc[1] > 0 {
- for i := range spec {
- if len(spec[i]) >= loc[1] {
- spec[i] = spec[i][loc[1]-1:]
- }
- }
- }
- return spec
-}
-
-// removes indent base on the first line
-func removeYamlIndent(spec []string) []string {
- loc := rxIndent.FindStringIndex(spec[0])
- var s []string
- if loc[1] > 0 {
- for i := range spec {
- if len(spec[i]) >= loc[1] {
- s = append(s, spec[i][loc[1]-1:])
- }
- }
- }
- return s
-}
-
-// aggregates lines in header until it sees a tag.
-type sectionedParser struct {
- header []string
- matched map[string]tagParser
- annotation valueParser
-
- seenTag bool
- skipHeader bool
- setTitle func([]string)
- setDescription func([]string)
- workedOutTitle bool
- taggers []tagParser
- currentTagger *tagParser
- title []string
- ignored bool
-}
-
-func (st *sectionedParser) collectTitleDescription() {
- if st.workedOutTitle {
- return
- }
- if st.setTitle == nil {
- st.header = cleanupScannerLines(st.header, rxUncommentHeaders, nil)
- return
- }
-
- st.workedOutTitle = true
- st.title, st.header = collectScannerTitleDescription(st.header)
-}
-
-func (st *sectionedParser) Title() []string {
- st.collectTitleDescription()
- return st.title
-}
-
-func (st *sectionedParser) Description() []string {
- st.collectTitleDescription()
- return st.header
-}
-
-func (st *sectionedParser) Parse(doc *ast.CommentGroup) error {
- if doc == nil {
- return nil
- }
-COMMENTS:
- for _, c := range doc.List {
- for _, line := range strings.Split(c.Text, "\n") {
- if rxSwaggerAnnotation.MatchString(line) {
- if rxIgnoreOverride.MatchString(line) {
- st.ignored = true
- break COMMENTS // an explicit ignore terminates this parser
- }
- if st.annotation == nil || !st.annotation.Matches(line) {
- break COMMENTS // a new swagger: annotation terminates this parser
- }
-
- _ = st.annotation.Parse([]string{line})
- if len(st.header) > 0 {
- st.seenTag = true
- }
- continue
- }
-
- var matched bool
- for _, tagger := range st.taggers {
- if tagger.Matches(line) {
- st.seenTag = true
- st.currentTagger = &tagger
- matched = true
- break
- }
- }
-
- if st.currentTagger == nil {
- if !st.skipHeader && !st.seenTag {
- st.header = append(st.header, line)
- }
- // didn't match a tag, moving on
- continue
- }
-
- if st.currentTagger.MultiLine && matched {
- // the first line of a multiline tagger doesn't count
- continue
- }
-
- ts, ok := st.matched[st.currentTagger.Name]
- if !ok {
- ts = *st.currentTagger
- }
- ts.Lines = append(ts.Lines, line)
- if st.matched == nil {
- st.matched = make(map[string]tagParser)
- }
- st.matched[st.currentTagger.Name] = ts
-
- if !st.currentTagger.MultiLine {
- st.currentTagger = nil
- }
- }
- }
- if st.setTitle != nil {
- st.setTitle(st.Title())
- }
- if st.setDescription != nil {
- st.setDescription(st.Description())
- }
- for _, mt := range st.matched {
- if !mt.SkipCleanUp {
- mt.Lines = cleanupScannerLines(mt.Lines, rxUncommentHeaders, nil)
- }
- if err := mt.Parse(mt.Lines); err != nil {
- return err
- }
- }
- return nil
-}
-
-type vendorExtensibleParser struct {
- setExtensions func(ext spec.Extensions, dest interface{})
-}
-
-func (extParser vendorExtensibleParser) ParseInto(dest interface{}) func(json.RawMessage) error {
- return func(jsonValue json.RawMessage) error {
- var jsonData spec.Extensions
- err := json.Unmarshal(jsonValue, &jsonData)
- if err != nil {
- return err
- }
- for k := range jsonData {
- if !rxAllowedExtensions.MatchString(k) {
- return fmt.Errorf("invalid schema extension name, should start from `x-`: %s", k)
- }
- }
- extParser.setExtensions(jsonData, dest)
- return nil
- }
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/schema.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/schema.go
deleted file mode 100644
index 37ce6cf25db..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/schema.go
+++ /dev/null
@@ -1,1358 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "fmt"
- "go/ast"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "reflect"
- "strconv"
- "strings"
-
- "golang.org/x/tools/go/loader"
-
- "github.com/go-openapi/spec"
-)
-
-func addExtension(ve *spec.VendorExtensible, key string, value interface{}) {
- if os.Getenv("SWAGGER_GENERATE_EXTENSION") == "false" {
- return
- }
-
- ve.AddExtension(key, value)
-}
-
-type schemaTypable struct {
- schema *spec.Schema
- level int
-}
-
-func (st schemaTypable) Typed(tpe, format string) {
- st.schema.Typed(tpe, format)
-}
-
-func (st schemaTypable) SetRef(ref spec.Ref) {
- st.schema.Ref = ref
-}
-
-func (st schemaTypable) Schema() *spec.Schema {
- return st.schema
-}
-
-func (st schemaTypable) Items() swaggerTypable {
- if st.schema.Items == nil {
- st.schema.Items = new(spec.SchemaOrArray)
- }
- if st.schema.Items.Schema == nil {
- st.schema.Items.Schema = new(spec.Schema)
- }
-
- st.schema.Typed("array", "")
- return schemaTypable{st.schema.Items.Schema, st.level + 1}
-}
-
-func (st schemaTypable) AdditionalProperties() swaggerTypable {
- if st.schema.AdditionalProperties == nil {
- st.schema.AdditionalProperties = new(spec.SchemaOrBool)
- }
- if st.schema.AdditionalProperties.Schema == nil {
- st.schema.AdditionalProperties.Schema = new(spec.Schema)
- }
-
- st.schema.Typed("object", "")
- return schemaTypable{st.schema.AdditionalProperties.Schema, st.level + 1}
-}
-
-func (st schemaTypable) Level() int { return st.level }
-
-func (st schemaTypable) WithEnum(values ...interface{}) {
- st.schema.WithEnum(values...)
-}
-
-type schemaValidations struct {
- current *spec.Schema
-}
-
-func (sv schemaValidations) SetMaximum(val float64, exclusive bool) {
- sv.current.Maximum = &val
- sv.current.ExclusiveMaximum = exclusive
-}
-func (sv schemaValidations) SetMinimum(val float64, exclusive bool) {
- sv.current.Minimum = &val
- sv.current.ExclusiveMinimum = exclusive
-}
-func (sv schemaValidations) SetMultipleOf(val float64) { sv.current.MultipleOf = &val }
-func (sv schemaValidations) SetMinItems(val int64) { sv.current.MinItems = &val }
-func (sv schemaValidations) SetMaxItems(val int64) { sv.current.MaxItems = &val }
-func (sv schemaValidations) SetMinLength(val int64) { sv.current.MinLength = &val }
-func (sv schemaValidations) SetMaxLength(val int64) { sv.current.MaxLength = &val }
-func (sv schemaValidations) SetPattern(val string) { sv.current.Pattern = val }
-func (sv schemaValidations) SetUnique(val bool) { sv.current.UniqueItems = val }
-func (sv schemaValidations) SetDefault(val interface{}) { sv.current.Default = val }
-func (sv schemaValidations) SetExample(val interface{}) { sv.current.Example = val }
-func (sv schemaValidations) SetEnum(val string) {
- sv.current.Enum = parseEnum(val, &spec.SimpleSchema{Format: sv.current.Format, Type: sv.current.Type[0]})
-}
-
-type schemaDecl struct {
- File *ast.File
- Decl *ast.GenDecl
- TypeSpec *ast.TypeSpec
- GoName string
- Name string
- annotated bool
-}
-
-func newSchemaDecl(file *ast.File, decl *ast.GenDecl, ts *ast.TypeSpec) *schemaDecl {
- sd := &schemaDecl{
- File: file,
- Decl: decl,
- TypeSpec: ts,
- }
- sd.inferNames()
- return sd
-}
-
-func (sd *schemaDecl) hasAnnotation() bool {
- sd.inferNames()
- return sd.annotated
-}
-
-func (sd *schemaDecl) inferNames() (goName string, name string) {
- if sd.GoName != "" {
- goName, name = sd.GoName, sd.Name
- return
- }
- goName = sd.TypeSpec.Name.Name
- name = goName
- if sd.Decl.Doc != nil {
- DECLS:
- for _, cmt := range sd.Decl.Doc.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxModelOverride.FindStringSubmatch(ln)
- if len(matches) > 0 {
- sd.annotated = true
- }
- if len(matches) > 1 && len(matches[1]) > 0 {
- name = matches[1]
- break DECLS
- }
- }
- }
- }
- sd.GoName = goName
- sd.Name = name
- return
-}
-
-type schemaParser struct {
- program *loader.Program
- postDecls []schemaDecl
- known map[string]spec.Schema
- discovered *schemaDecl
-}
-
-func newSchemaParser(prog *loader.Program) *schemaParser {
- scp := new(schemaParser)
- scp.program = prog
- scp.known = make(map[string]spec.Schema)
- return scp
-}
-
-func (scp *schemaParser) Parse(gofile *ast.File, target interface{}) error {
- tgt := target.(map[string]spec.Schema)
- for _, decl := range gofile.Decls {
- gd, ok := decl.(*ast.GenDecl)
- if !ok {
- continue
- }
- for _, spc := range gd.Specs {
- if ts, ok := spc.(*ast.TypeSpec); ok {
- sd := newSchemaDecl(gofile, gd, ts)
- if err := scp.parseDecl(tgt, sd); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (scp *schemaParser) parseDecl(definitions map[string]spec.Schema, decl *schemaDecl) error {
- // check if there is a swagger:model tag that is followed by a word,
- // this word is the type name for swagger
- // the package and type are recorded in the extensions
- // once type name is found convert it to a schema, by looking up the schema in the
- // definitions dictionary that got passed into this parse method
-
- // if our schemaParser is parsing a discovered schemaDecl and it does not match
- // the current schemaDecl we can skip parsing.
- if scp.discovered != nil && scp.discovered.Name != decl.Name {
- return nil
- }
-
- decl.inferNames()
- schema := definitions[decl.Name]
- schPtr := &schema
-
- // analyze doc comment for the model
- sp := new(sectionedParser)
- sp.setTitle = func(lines []string) { schema.Title = joinDropLast(lines) }
- sp.setDescription = func(lines []string) { schema.Description = joinDropLast(lines) }
- if err := sp.Parse(decl.Decl.Doc); err != nil {
- return err
- }
-
- // if the type is marked to ignore, just return
- if sp.ignored {
- return nil
- }
-
- // analyze struct body for fields etc
- // each exported struct field:
- // * gets a type mapped to a go primitive
- // * perhaps gets a format
- // * has to document the validations that apply for the type and the field
- // * when the struct field points to a model it becomes a ref: #/definitions/ModelName
- // * the first line of the comment is the title
- // * the following lines are the description
- switch tpe := decl.TypeSpec.Type.(type) {
- case *ast.StructType:
- if err := scp.parseStructType(decl.File, schPtr, tpe, make(map[string]string)); err != nil {
- return err
- }
- case *ast.InterfaceType:
- if err := scp.parseInterfaceType(decl.File, schPtr, tpe, make(map[string]string)); err != nil {
- return err
- }
- case *ast.Ident:
- prop := &schemaTypable{schPtr, 0}
- if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
- prop.Typed("string", strfmtName)
- } else {
- if err := scp.parseNamedType(decl.File, tpe, prop); err != nil {
- return err
- }
- }
- if enumName, ok := enumName(decl.Decl.Doc); ok {
- var enumValues = getEnumValues(decl.File, enumName)
- if len(enumValues) > 0 {
- var typeName = reflect.TypeOf(enumValues[0]).String()
- prop.WithEnum(enumValues...)
-
- err := swaggerSchemaForType(typeName, prop)
- if err != nil {
- return fmt.Errorf("file %s, error is: %v", decl.File.Name, err)
- }
- }
- }
- case *ast.SelectorExpr:
- prop := &schemaTypable{schPtr, 0}
- if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
- prop.Typed("string", strfmtName)
- } else {
- if err := scp.parseNamedType(decl.File, tpe, prop); err != nil {
- return err
- }
- }
-
- case *ast.ArrayType:
- prop := &schemaTypable{schPtr, 0}
- if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
- prop.Items().Typed("string", strfmtName)
- } else {
- if err := scp.parseNamedType(decl.File, tpe, &schemaTypable{schPtr, 0}); err != nil {
- return err
- }
- }
-
- case *ast.MapType:
- prop := &schemaTypable{schPtr, 0}
- if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
- prop.AdditionalProperties().Typed("string", strfmtName)
- } else {
- if err := scp.parseNamedType(decl.File, tpe, &schemaTypable{schPtr, 0}); err != nil {
- return err
- }
- }
- default:
- log.Printf("WARNING: Missing parser for a %T, skipping model: %s\n", tpe, decl.Name)
- return nil
- }
-
- if schPtr.Ref.String() == "" {
- if decl.Name != decl.GoName {
- addExtension(&schPtr.VendorExtensible, "x-go-name", decl.GoName)
- }
- for _, pkgInfo := range scp.program.AllPackages {
- if pkgInfo.Importable {
- for _, fil := range pkgInfo.Files {
- if fil.Pos() == decl.File.Pos() {
- addExtension(&schPtr.VendorExtensible, "x-go-package", pkgInfo.Pkg.Path())
- }
- }
- }
- }
- }
- definitions[decl.Name] = schema
- return nil
-}
-
-func (scp *schemaParser) parseNamedType(gofile *ast.File, expr ast.Expr, prop swaggerTypable) error {
- switch ftpe := expr.(type) {
- case *ast.Ident: // simple value
- pkg, err := scp.packageForFile(gofile, ftpe)
- if err != nil {
- return err
- }
- return scp.parseIdentProperty(pkg, ftpe, prop)
-
- case *ast.StarExpr: // pointer to something, optional by default
- if err := scp.parseNamedType(gofile, ftpe.X, prop); err != nil {
- return err
- }
-
- case *ast.ArrayType: // slice type
- if err := scp.parseNamedType(gofile, ftpe.Elt, prop.Items()); err != nil {
- return err
- }
-
- case *ast.StructType:
- schema := prop.Schema()
- if schema == nil {
- return fmt.Errorf("items doesn't support embedded structs")
- }
- return scp.parseStructType(gofile, prop.Schema(), ftpe, make(map[string]string))
-
- case *ast.SelectorExpr:
- err := scp.typeForSelector(gofile, ftpe, prop)
- return err
-
- case *ast.MapType:
- // check if key is a string type, if not print a message
- // and skip the map property. Only maps with string keys can go into additional properties
- sch := prop.Schema()
- if sch == nil {
- return fmt.Errorf("items doesn't support maps")
- }
- if keyIdent, ok := ftpe.Key.(*ast.Ident); sch != nil && ok {
- if keyIdent.Name == "string" {
- if sch.AdditionalProperties == nil {
- sch.AdditionalProperties = new(spec.SchemaOrBool)
- }
- sch.AdditionalProperties.Allows = false
- if sch.AdditionalProperties.Schema == nil {
- sch.AdditionalProperties.Schema = new(spec.Schema)
- }
- if err := scp.parseNamedType(gofile, ftpe.Value, schemaTypable{sch.AdditionalProperties.Schema, 0}); err != nil {
- return err
- }
- sch.Typed("object", "")
- }
- }
-
- case *ast.InterfaceType:
- prop.Schema().Typed("object", "")
- default:
- pos := "unknown file:unknown position"
- if scp != nil {
- if scp.program != nil {
- if scp.program.Fset != nil {
- pos = scp.program.Fset.Position(expr.Pos()).String()
- }
- }
- }
- return fmt.Errorf("expr (%s) is unsupported for a schema", pos)
- }
- return nil
-}
-
-func (scp *schemaParser) parseEmbeddedType(gofile *ast.File, schema *spec.Schema, expr ast.Expr, seenPreviously map[string]string) error {
- switch tpe := expr.(type) {
- case *ast.Ident:
- // do lookup of type
- // take primitives into account, they should result in an error for swagger
- pkg, err := scp.packageForFile(gofile, tpe)
- if err != nil {
- return err
- }
- file, _, ts, err := findSourceFile(pkg, tpe.Name)
- if err != nil {
- return err
- }
-
- switch st := ts.Type.(type) {
- case *ast.StructType:
- return scp.parseStructType(file, schema, st, seenPreviously)
- case *ast.InterfaceType:
- return scp.parseInterfaceType(file, schema, st, seenPreviously)
- default:
- prop := &schemaTypable{schema, 0}
- return scp.parseNamedType(gofile, st, prop)
- }
-
- case *ast.SelectorExpr:
- // look up package, file and then type
- pkg, err := scp.packageForSelector(gofile, tpe.X)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- file, _, ts, err := findSourceFile(pkg, tpe.Sel.Name)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- if st, ok := ts.Type.(*ast.StructType); ok {
- return scp.parseStructType(file, schema, st, seenPreviously)
- }
- if st, ok := ts.Type.(*ast.InterfaceType); ok {
- return scp.parseInterfaceType(file, schema, st, seenPreviously)
- }
- case *ast.StarExpr:
- return scp.parseEmbeddedType(gofile, schema, tpe.X, seenPreviously)
- default:
- return fmt.Errorf(
- "parseEmbeddedType: unsupported type %v at position %#v",
- expr,
- scp.program.Fset.Position(tpe.Pos()),
- )
- }
- return fmt.Errorf("unable to resolve embedded struct for: %v", expr)
-}
-
-func (scp *schemaParser) parseAllOfMember(gofile *ast.File, schema *spec.Schema, expr ast.Expr, seenPreviously map[string]string) error {
- // TODO: check if struct is annotated with swagger:model or known in the definitions otherwise
- var pkg *loader.PackageInfo
- var file *ast.File
- var gd *ast.GenDecl
- var ts *ast.TypeSpec
- var err error
-
- switch tpe := expr.(type) {
- case *ast.Ident:
- // do lookup of type
- // take primitives into account, they should result in an error for swagger
- pkg, err = scp.packageForFile(gofile, tpe)
- if err != nil {
- return err
- }
- file, gd, ts, err = findSourceFile(pkg, tpe.Name)
- if err != nil {
- return err
- }
-
- case *ast.SelectorExpr:
- // look up package, file and then type
- pkg, err = scp.packageForSelector(gofile, tpe.X)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- file, gd, ts, err = findSourceFile(pkg, tpe.Sel.Name)
- if err != nil {
- return fmt.Errorf("embedded struct: %v", err)
- }
- default:
- return fmt.Errorf("unable to resolve allOf member for: %v", expr)
- }
-
- sd := newSchemaDecl(file, gd, ts)
- if sd.hasAnnotation() && pkg.String() != "time" && ts.Name.Name != "Time" {
- ref, err := spec.NewRef("#/definitions/" + sd.Name)
- if err != nil {
- return err
- }
- schema.Ref = ref
- scp.postDecls = append(scp.postDecls, *sd)
- } else {
- switch st := ts.Type.(type) {
- case *ast.StructType:
- return scp.parseStructType(file, schema, st, seenPreviously)
- case *ast.InterfaceType:
- return scp.parseInterfaceType(file, schema, st, seenPreviously)
- }
- }
-
- return nil
-}
-func (scp *schemaParser) parseInterfaceType(gofile *ast.File, bschema *spec.Schema, tpe *ast.InterfaceType, seenPreviously map[string]string) error {
- if tpe.Methods == nil {
- return nil
- }
-
- // first check if this has embedded interfaces, if so make sure to refer to those by ref
- // when they are decorated with an allOf annotation
- // go over the method list again and this time collect the nullary methods and parse the comments
- // as if they are properties on a struct
- var schema *spec.Schema
- seenProperties := seenPreviously
- hasAllOf := false
-
- for _, fld := range tpe.Methods.List {
- if len(fld.Names) == 0 {
- // if this created an allOf property then we have to rejig the schema var
- // because all the fields collected that aren't from embedded structs should go in
- // their own proper schema
- // first process embedded structs in order of embedding
- if allOfMember(fld.Doc) {
- hasAllOf = true
- if schema == nil {
- schema = new(spec.Schema)
- }
- var newSch spec.Schema
- // when the embedded struct is annotated with swagger:allOf it will be used as allOf property
- // otherwise the fields will just be included as normal properties
- if err := scp.parseAllOfMember(gofile, &newSch, fld.Type, seenProperties); err != nil {
- return err
- }
-
- if fld.Doc != nil {
- for _, cmt := range fld.Doc.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxAllOf.FindStringSubmatch(ln)
- ml := len(matches)
- if ml > 1 {
- mv := matches[ml-1]
- if mv != "" {
- addExtension(&bschema.VendorExtensible, "x-class", mv)
- }
- }
- }
- }
- }
-
- bschema.AllOf = append(bschema.AllOf, newSch)
- continue
- }
-
- var newSch spec.Schema
- // when the embedded struct is annotated with swagger:allOf it will be used as allOf property
- // otherwise the fields will just be included as normal properties
- if err := scp.parseEmbeddedType(gofile, &newSch, fld.Type, seenProperties); err != nil {
- return err
- }
- bschema.AllOf = append(bschema.AllOf, newSch)
- hasAllOf = true
- }
- }
-
- if schema == nil {
- schema = bschema
- }
- // then add and possibly override values
- if schema.Properties == nil {
- schema.Properties = make(map[string]spec.Schema)
- }
- schema.Typed("object", "")
- for _, fld := range tpe.Methods.List {
- if mtpe, ok := fld.Type.(*ast.FuncType); ok && mtpe.Params.NumFields() == 0 && mtpe.Results.NumFields() == 1 {
- gnm := fld.Names[0].Name
- nm := gnm
- if fld.Doc != nil {
- for _, cmt := range fld.Doc.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxName.FindStringSubmatch(ln)
- ml := len(matches)
- if ml > 1 {
- nm = matches[ml-1]
- }
- }
- }
- }
-
- ps := schema.Properties[nm]
- if err := parseProperty(scp, gofile, mtpe.Results.List[0].Type, schemaTypable{&ps, 0}); err != nil {
- return err
- }
-
- if err := scp.createParser(nm, schema, &ps, fld).Parse(fld.Doc); err != nil {
- return err
- }
-
- if ps.Ref.String() == "" && nm != gnm {
- addExtension(&ps.VendorExtensible, "x-go-name", gnm)
- }
- seenProperties[nm] = gnm
- schema.Properties[nm] = ps
- }
-
- }
- if schema != nil && hasAllOf && len(schema.Properties) > 0 {
- bschema.AllOf = append(bschema.AllOf, *schema)
- }
- for k := range schema.Properties {
- if _, ok := seenProperties[k]; !ok {
- delete(schema.Properties, k)
- }
- }
- return nil
-}
-
-func (scp *schemaParser) parseStructType(gofile *ast.File, bschema *spec.Schema, tpe *ast.StructType, seenPreviously map[string]string) error {
- if tpe.Fields == nil {
- return nil
- }
- var schema *spec.Schema
- seenProperties := seenPreviously
- hasAllOf := false
-
- for _, fld := range tpe.Fields.List {
- if len(fld.Names) == 0 {
- // if the field is annotated with swagger:ignore, ignore it
- if ignored(fld.Doc) {
- continue
- }
-
- _, ignore, _, err := parseJSONTag(fld)
- if err != nil {
- return err
- }
- if ignore {
- continue
- }
-
- // if this created an allOf property then we have to rejig the schema var
- // because all the fields collected that aren't from embedded structs should go in
- // their own proper schema
- // first process embedded structs in order of embedding
- if allOfMember(fld.Doc) {
- hasAllOf = true
- if schema == nil {
- schema = new(spec.Schema)
- }
- var newSch spec.Schema
- // when the embedded struct is annotated with swagger:allOf it will be used as allOf property
- // otherwise the fields will just be included as normal properties
- if err := scp.parseAllOfMember(gofile, &newSch, fld.Type, seenProperties); err != nil {
- return err
- }
-
- if fld.Doc != nil {
- for _, cmt := range fld.Doc.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxAllOf.FindStringSubmatch(ln)
- ml := len(matches)
- if ml > 1 {
- mv := matches[ml-1]
- if mv != "" {
- addExtension(&bschema.VendorExtensible, "x-class", mv)
- }
- }
- }
- }
- }
-
- bschema.AllOf = append(bschema.AllOf, newSch)
- continue
- }
- if schema == nil {
- schema = bschema
- }
-
- // when the embedded struct is annotated with swagger:allOf it will be used as allOf property
- // otherwise the fields will just be included as normal properties
- if err := scp.parseEmbeddedType(gofile, schema, fld.Type, seenProperties); err != nil {
- return err
- }
- }
- }
- if schema == nil {
- schema = bschema
- }
-
- // then add and possibly override values
- if schema.Properties == nil {
- schema.Properties = make(map[string]spec.Schema)
- }
- schema.Typed("object", "")
- for _, fld := range tpe.Fields.List {
- if len(fld.Names) > 0 && fld.Names[0] != nil && fld.Names[0].IsExported() {
- // if the field is annotated with swagger:ignore, ignore it
- if ignored(fld.Doc) {
- continue
- }
-
- gnm := fld.Names[0].Name
- nm, ignore, isString, err := parseJSONTag(fld)
- if err != nil {
- return err
- }
- if ignore {
- for seenTagName, seenFieldName := range seenPreviously {
- if seenFieldName == gnm {
- delete(schema.Properties, seenTagName)
- break
- }
- }
- continue
- }
-
- ps := schema.Properties[nm]
- if err := parseProperty(scp, gofile, fld.Type, schemaTypable{&ps, 0}); err != nil {
- return err
- }
- if isString {
- ps.Typed("string", ps.Format)
- ps.Ref = spec.Ref{}
- }
- if strfmtName, ok := strfmtName(fld.Doc); ok {
- ps.Typed("string", strfmtName)
- ps.Ref = spec.Ref{}
- }
-
- if err := scp.createParser(nm, schema, &ps, fld).Parse(fld.Doc); err != nil {
- return err
- }
-
- if ps.Ref.String() == "" && nm != gnm {
- addExtension(&ps.VendorExtensible, "x-go-name", gnm)
- }
- // we have 2 cases:
- // 1. field with different name override tag
- // 2. field with different name removes tag
- // so we need to save both tag&name
- seenProperties[nm] = gnm
- schema.Properties[nm] = ps
- }
- }
- if schema != nil && hasAllOf && len(schema.Properties) > 0 {
- bschema.AllOf = append(bschema.AllOf, *schema)
- }
- for k := range schema.Properties {
- if _, ok := seenProperties[k]; !ok {
- delete(schema.Properties, k)
- }
- }
- return nil
-}
-
-var schemaVendorExtensibleParser = vendorExtensibleParser{
- setExtensions: func(ext spec.Extensions, dest interface{}) {
- dest.(*spec.Schema).Extensions = ext
- },
-}
-
-func (scp *schemaParser) createParser(nm string, schema, ps *spec.Schema, fld *ast.Field) *sectionedParser {
- sp := new(sectionedParser)
-
- schemeType, err := ps.Type.MarshalJSON()
- if err != nil {
- return nil
- }
-
- if ps.Ref.String() == "" {
- sp.setDescription = func(lines []string) { ps.Description = joinDropLast(lines) }
- sp.taggers = []tagParser{
- newSingleLineTagParser("maximum", &setMaximum{schemaValidations{ps}, rxf(rxMaximumFmt, "")}),
- newSingleLineTagParser("minimum", &setMinimum{schemaValidations{ps}, rxf(rxMinimumFmt, "")}),
- newSingleLineTagParser("multipleOf", &setMultipleOf{schemaValidations{ps}, rxf(rxMultipleOfFmt, "")}),
- newSingleLineTagParser("minLength", &setMinLength{schemaValidations{ps}, rxf(rxMinLengthFmt, "")}),
- newSingleLineTagParser("maxLength", &setMaxLength{schemaValidations{ps}, rxf(rxMaxLengthFmt, "")}),
- newSingleLineTagParser("pattern", &setPattern{schemaValidations{ps}, rxf(rxPatternFmt, "")}),
- newSingleLineTagParser("minItems", &setMinItems{schemaValidations{ps}, rxf(rxMinItemsFmt, "")}),
- newSingleLineTagParser("maxItems", &setMaxItems{schemaValidations{ps}, rxf(rxMaxItemsFmt, "")}),
- newSingleLineTagParser("unique", &setUnique{schemaValidations{ps}, rxf(rxUniqueFmt, "")}),
- newSingleLineTagParser("enum", &setEnum{schemaValidations{ps}, rxf(rxEnumFmt, "")}),
- newSingleLineTagParser("default", &setDefault{&spec.SimpleSchema{Type: string(schemeType)}, schemaValidations{ps}, rxf(rxDefaultFmt, "")}),
- newSingleLineTagParser("type", &setDefault{&spec.SimpleSchema{Type: string(schemeType)}, schemaValidations{ps}, rxf(rxDefaultFmt, "")}),
- newSingleLineTagParser("example", &setExample{&spec.SimpleSchema{Type: string(schemeType)}, schemaValidations{ps}, rxf(rxExampleFmt, "")}),
- newSingleLineTagParser("required", &setRequiredSchema{schema, nm}),
- newSingleLineTagParser("readOnly", &setReadOnlySchema{ps}),
- newSingleLineTagParser("discriminator", &setDiscriminator{schema, nm}),
- newMultiLineTagParser("YAMLExtensionsBlock", newYamlParser(rxExtensions, schemaVendorExtensibleParser.ParseInto(ps)), true),
- }
-
- itemsTaggers := func(items *spec.Schema, level int) []tagParser {
- schemeType, err := items.Type.MarshalJSON()
- if err != nil {
- return nil
- }
- // the expression is 1-index based not 0-index
- itemsPrefix := fmt.Sprintf(rxItemsPrefixFmt, level+1)
- return []tagParser{
- newSingleLineTagParser(fmt.Sprintf("items%dMaximum", level), &setMaximum{schemaValidations{items}, rxf(rxMaximumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinimum", level), &setMinimum{schemaValidations{items}, rxf(rxMinimumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMultipleOf", level), &setMultipleOf{schemaValidations{items}, rxf(rxMultipleOfFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinLength", level), &setMinLength{schemaValidations{items}, rxf(rxMinLengthFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMaxLength", level), &setMaxLength{schemaValidations{items}, rxf(rxMaxLengthFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dPattern", level), &setPattern{schemaValidations{items}, rxf(rxPatternFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMinItems", level), &setMinItems{schemaValidations{items}, rxf(rxMinItemsFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dMaxItems", level), &setMaxItems{schemaValidations{items}, rxf(rxMaxItemsFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dUnique", level), &setUnique{schemaValidations{items}, rxf(rxUniqueFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dEnum", level), &setEnum{schemaValidations{items}, rxf(rxEnumFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dDefault", level), &setDefault{&spec.SimpleSchema{Type: string(schemeType)}, schemaValidations{items}, rxf(rxDefaultFmt, itemsPrefix)}),
- newSingleLineTagParser(fmt.Sprintf("items%dExample", level), &setExample{&spec.SimpleSchema{Type: string(schemeType)}, schemaValidations{items}, rxf(rxExampleFmt, itemsPrefix)}),
- }
- }
-
- var parseArrayTypes func(expr ast.Expr, items *spec.SchemaOrArray, level int) ([]tagParser, error)
- parseArrayTypes = func(expr ast.Expr, items *spec.SchemaOrArray, level int) ([]tagParser, error) {
- if items == nil || items.Schema == nil {
- return []tagParser{}, nil
- }
- switch iftpe := expr.(type) {
- case *ast.ArrayType:
- eleTaggers := itemsTaggers(items.Schema, level)
- sp.taggers = append(eleTaggers, sp.taggers...)
- otherTaggers, err := parseArrayTypes(iftpe.Elt, items.Schema.Items, level+1)
- if err != nil {
- return nil, err
- }
- return otherTaggers, nil
- case *ast.Ident:
- taggers := []tagParser{}
- if iftpe.Obj == nil {
- taggers = itemsTaggers(items.Schema, level)
- }
- otherTaggers, err := parseArrayTypes(expr, items.Schema.Items, level+1)
- if err != nil {
- return nil, err
- }
- return append(taggers, otherTaggers...), nil
- case *ast.StarExpr:
- otherTaggers, err := parseArrayTypes(iftpe.X, items, level)
- if err != nil {
- return nil, err
- }
- return otherTaggers, nil
- default:
- return nil, fmt.Errorf("unknown field type ele for %q", nm)
- }
- }
- // check if this is a primitive, if so parse the validations from the
- // doc comments of the slice declaration.
- if ftped, ok := fld.Type.(*ast.ArrayType); ok {
- taggers, err := parseArrayTypes(ftped.Elt, ps.Items, 0)
- if err != nil {
- return sp
- }
- sp.taggers = append(taggers, sp.taggers...)
- }
-
- } else {
- sp.taggers = []tagParser{
- newSingleLineTagParser("required", &setRequiredSchema{schema, nm}),
- }
- }
- return sp
-}
-
-// hasFilePathPrefix reports whether the filesystem path s begins with the
-// elements in prefix.
-//
-// taken from: https://github.com/golang/go/blob/c87520c5981ecdeaa99e7ba636a6088f900c0c75/src/cmd/go/internal/load/path.go#L60-L80
-func hasFilePathPrefix(s, prefix string) bool {
- sv := strings.ToUpper(filepath.VolumeName(s))
- pv := strings.ToUpper(filepath.VolumeName(prefix))
- s = s[len(sv):]
- prefix = prefix[len(pv):]
- switch {
- default:
- return false
- case sv != pv:
- return false
- case len(s) == len(prefix):
- return s == prefix
- case len(s) > len(prefix):
- if prefix != "" && prefix[len(prefix)-1] == filepath.Separator {
- return strings.HasPrefix(s, prefix)
- }
- return s[len(prefix)] == filepath.Separator && s[:len(prefix)] == prefix
- }
-}
-
-func goroot() string {
- cmd := exec.Command("go", "env", "GOROOT")
- out, err := cmd.Output()
- if err != nil {
- panic("Could not detect GOROOT")
- }
- return string(out)
-}
-
-func (scp *schemaParser) packageForFile(gofile *ast.File, tpe *ast.Ident) (*loader.PackageInfo, error) {
- fn := scp.program.Fset.File(gofile.Pos()).Name()
- if Debug {
- log.Println("trying for", fn, tpe.Name, tpe.String())
- }
- fa, err := filepath.Abs(fn)
- if err != nil {
- return nil, err
- }
- if Debug {
- log.Println("absolute path", fa)
- }
- var fgp string
- gopath := os.Getenv("GOPATH")
- if gopath == "" {
- gopath = filepath.Join(os.Getenv("HOME"), "go")
- }
- for _, p := range append(filepath.SplitList(gopath), goroot()) {
- pref := filepath.Join(p, "src")
- if hasFilePathPrefix(fa, pref) {
- fgp = filepath.Dir(strings.TrimPrefix(fa, pref))[1:]
- break
- }
- }
- if Debug {
- log.Println("package in gopath", fgp)
- }
- for pkg, pkgInfo := range scp.program.AllPackages {
- if Debug {
- log.Println("inferring for", tpe.Name, "with", gofile.Name.Name, "at", pkg.Path(), "against", filepath.ToSlash(fgp))
- }
- if pkg.Name() == gofile.Name.Name && filepath.ToSlash(fgp) == pkg.Path() {
- return pkgInfo, nil
- }
- }
-
- return nil, fmt.Errorf("unable to determine package for %s", fn)
-}
-
-func (scp *schemaParser) packageForSelector(gofile *ast.File, expr ast.Expr) (*loader.PackageInfo, error) {
-
- if pth, ok := expr.(*ast.Ident); ok {
- // lookup import
- var selPath string
- for _, imp := range gofile.Imports {
- pv, err := strconv.Unquote(imp.Path.Value)
- if err != nil {
- pv = imp.Path.Value
- }
- if imp.Name != nil {
- if imp.Name.Name == pth.Name {
- selPath = pv
- break
- }
- } else {
- pkg := scp.program.Package(pv)
- if pkg != nil && pth.Name == pkg.Pkg.Name() {
- selPath = pv
- break
- } else {
- parts := strings.Split(pv, "/")
- if len(parts) > 0 && parts[len(parts)-1] == pth.Name {
- selPath = pv
- break
- }
- }
- }
- }
- // find actual struct
- if selPath == "" {
- return nil, fmt.Errorf("no import found for %s", pth.Name)
- }
-
- pkg := scp.program.Package(selPath)
- if pkg != nil {
- return pkg, nil
- }
- // TODO: I must admit this made me cry, it's not even a great solution.
- pkg = scp.program.Package("github.com/go-swagger/go-swagger/vendor/" + selPath)
- if pkg != nil {
- return pkg, nil
- }
- for _, info := range scp.program.AllPackages {
- n := info.String()
- path := "/vendor/" + selPath
- if strings.HasSuffix(n, path) {
- pkg = scp.program.Package(n)
- return pkg, nil
- }
- }
- }
- return nil, fmt.Errorf("can't determine selector path from %v", expr)
-}
-
-func (scp *schemaParser) makeRef(file *ast.File, pkg *loader.PackageInfo, gd *ast.GenDecl, ts *ast.TypeSpec, prop swaggerTypable) error {
- sd := newSchemaDecl(file, gd, ts)
- sd.inferNames()
- // make an exception for time.Time because this is a well-known string format
- if sd.Name == "Time" && pkg.String() == "time" {
- return nil
- }
- ref, err := spec.NewRef("#/definitions/" + sd.Name)
- if err != nil {
- return err
- }
- prop.SetRef(ref)
- scp.postDecls = append(scp.postDecls, *sd)
- return nil
-}
-
-func (scp *schemaParser) parseIdentProperty(pkg *loader.PackageInfo, expr *ast.Ident, prop swaggerTypable) error {
- // before proceeding make an exception to time.Time because it is a well known string format
- if pkg.String() == "time" && expr.String() == "Time" {
- prop.Typed("string", "date-time")
- return nil
- }
-
- // find the file this selector points to
- file, gd, ts, err := findSourceFile(pkg, expr.Name)
-
- if err != nil {
- err := swaggerSchemaForType(expr.Name, prop)
- if err != nil {
- return fmt.Errorf("package %s, error is: %v", pkg.String(), err)
- }
- return nil
- }
-
- if at, ok := ts.Type.(*ast.ArrayType); ok {
- // the swagger spec defines strfmt base64 as []byte.
- // in that case we don't actually want to turn it into an array
- // but we want to turn it into a string
- if _, ok := at.Elt.(*ast.Ident); ok {
- if strfmtName, ok := strfmtName(gd.Doc); ok {
- prop.Typed("string", strfmtName)
- return nil
- }
- }
- // this is a selector, so most likely not base64
- if strfmtName, ok := strfmtName(gd.Doc); ok {
- prop.Items().Typed("string", strfmtName)
- return nil
- }
- }
-
- // look at doc comments for swagger:strfmt [name]
- // when found this is the format name, create a schema with that name
- if strfmtName, ok := strfmtName(gd.Doc); ok {
- prop.Typed("string", strfmtName)
- return nil
- }
-
- if enumName, ok := enumName(gd.Doc); ok {
- var enumValues = getEnumValues(file, enumName)
- if len(enumValues) > 0 {
- prop.WithEnum(enumValues...)
- var typeName = reflect.TypeOf(enumValues[0]).String()
- err := swaggerSchemaForType(typeName, prop)
- if err != nil {
- return fmt.Errorf("file %s, error is: %v", file.Name, err)
- }
- }
- }
-
- if defaultName, ok := defaultName(gd.Doc); ok {
- log.Println(defaultName)
- return nil
- }
-
- if typeName, ok := typeName(gd.Doc); ok {
- _ = swaggerSchemaForType(typeName, prop)
- return nil
- }
-
- if isAliasParam(prop) || aliasParam(gd.Doc) {
- itype, ok := ts.Type.(*ast.Ident)
- if ok {
- err := swaggerSchemaForType(itype.Name, prop)
- if err == nil {
- return nil
- }
- }
- }
- switch tpe := ts.Type.(type) {
- case *ast.ArrayType:
- return scp.makeRef(file, pkg, gd, ts, prop)
- case *ast.StructType:
- return scp.makeRef(file, pkg, gd, ts, prop)
-
- case *ast.Ident:
- return scp.makeRef(file, pkg, gd, ts, prop)
-
- case *ast.StarExpr:
- return parseProperty(scp, file, tpe.X, prop)
-
- case *ast.SelectorExpr:
- // return scp.refForSelector(file, gd, tpe, ts, prop)
- return scp.makeRef(file, pkg, gd, ts, prop)
-
- case *ast.InterfaceType:
- return scp.makeRef(file, pkg, gd, ts, prop)
-
- case *ast.MapType:
- return scp.makeRef(file, pkg, gd, ts, prop)
-
- default:
- err := swaggerSchemaForType(expr.Name, prop)
- if err != nil {
- return fmt.Errorf("package %s, error is: %v", pkg.String(), err)
- }
- return nil
- }
-
-}
-
-func (scp *schemaParser) typeForSelector(gofile *ast.File, expr *ast.SelectorExpr, prop swaggerTypable) error {
- pkg, err := scp.packageForSelector(gofile, expr.X)
- if err != nil {
- return err
- }
-
- return scp.parseIdentProperty(pkg, expr.Sel, prop)
-}
-
-func findSourceFile(pkg *loader.PackageInfo, typeName string) (*ast.File, *ast.GenDecl, *ast.TypeSpec, error) {
- for _, file := range pkg.Files {
- for _, decl := range file.Decls {
- if gd, ok := decl.(*ast.GenDecl); ok {
- for _, gs := range gd.Specs {
- if ts, ok := gs.(*ast.TypeSpec); ok {
- strfmtNme, isStrfmt := strfmtName(gd.Doc)
- if (isStrfmt && strfmtNme == typeName) || ts.Name != nil && ts.Name.Name == typeName {
- return file, gd, ts, nil
- }
- }
- }
- }
- }
- }
- return nil, nil, nil, fmt.Errorf("unable to find %s in %s", typeName, pkg.String())
-}
-
-func allOfMember(comments *ast.CommentGroup) bool {
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- if rxAllOf.MatchString(ln) {
- return true
- }
- }
- }
- }
- return false
-}
-
-func fileParam(comments *ast.CommentGroup) bool {
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- if rxFileUpload.MatchString(ln) {
- return true
- }
- }
- }
- }
- return false
-}
-
-func strfmtName(comments *ast.CommentGroup) (string, bool) {
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxStrFmt.FindStringSubmatch(ln)
- if len(matches) > 1 && len(strings.TrimSpace(matches[1])) > 0 {
- return strings.TrimSpace(matches[1]), true
- }
- }
- }
- }
- return "", false
-}
-
-func ignored(comments *ast.CommentGroup) bool {
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- if rxIgnoreOverride.MatchString(ln) {
- return true
- }
- }
- }
- }
- return false
-}
-
-func enumName(comments *ast.CommentGroup) (string, bool) {
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxEnum.FindStringSubmatch(ln)
- if len(matches) > 1 && len(strings.TrimSpace(matches[1])) > 0 {
- return strings.TrimSpace(matches[1]), true
- }
- }
- }
- }
- return "", false
-}
-
-func aliasParam(comments *ast.CommentGroup) bool {
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- if rxAlias.MatchString(ln) {
- return true
- }
- }
- }
- }
- return false
-}
-
-func defaultName(comments *ast.CommentGroup) (string, bool) {
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxDefault.FindStringSubmatch(ln)
- if len(matches) > 1 && len(strings.TrimSpace(matches[1])) > 0 {
- return strings.TrimSpace(matches[1]), true
- }
- }
- }
- }
- return "", false
-}
-
-func typeName(comments *ast.CommentGroup) (string, bool) {
-
- var typ string
- if comments != nil {
- for _, cmt := range comments.List {
- for _, ln := range strings.Split(cmt.Text, "\n") {
- matches := rxType.FindStringSubmatch(ln)
- if len(matches) > 1 && len(strings.TrimSpace(matches[1])) > 0 {
- typ = strings.TrimSpace(matches[1])
- return typ, true
- }
- }
- }
- }
- return "", false
-}
-
-func parseProperty(scp *schemaParser, gofile *ast.File, fld ast.Expr, prop swaggerTypable) error {
- switch ftpe := fld.(type) {
- case *ast.Ident: // simple value
- pkg, err := scp.packageForFile(gofile, ftpe)
- if err != nil {
- return err
- }
- return scp.parseIdentProperty(pkg, ftpe, prop)
-
- case *ast.StarExpr: // pointer to something, optional by default
- if err := parseProperty(scp, gofile, ftpe.X, prop); err != nil {
- return err
- }
-
- case *ast.ArrayType: // slice type
- if err := parseProperty(scp, gofile, ftpe.Elt, prop.Items()); err != nil {
- return err
- }
-
- case *ast.StructType:
- schema := prop.Schema()
- if schema == nil {
- return fmt.Errorf("items doesn't support embedded structs")
- }
- return scp.parseStructType(gofile, prop.Schema(), ftpe, make(map[string]string))
-
- case *ast.SelectorExpr:
- err := scp.typeForSelector(gofile, ftpe, prop)
- return err
-
- case *ast.MapType:
- // check if key is a string type, if not print a message
- // and skip the map property. Only maps with string keys can go into additional properties
- sch := prop.Schema()
- if sch == nil {
- return fmt.Errorf("items doesn't support maps")
- }
- if keyIdent, ok := ftpe.Key.(*ast.Ident); sch != nil && ok {
- if keyIdent.Name == "string" {
- if sch.AdditionalProperties == nil {
- sch.AdditionalProperties = new(spec.SchemaOrBool)
- }
- sch.AdditionalProperties.Allows = false
- if sch.AdditionalProperties.Schema == nil {
- sch.AdditionalProperties.Schema = new(spec.Schema)
- }
- if err := parseProperty(scp, gofile, ftpe.Value, schemaTypable{sch.AdditionalProperties.Schema, 0}); err != nil {
- return err
- }
- sch.Typed("object", "")
- }
- }
-
- case *ast.InterfaceType:
- prop.Schema().Typed("object", "")
- default:
- pos := "unknown file:unknown position"
- if scp != nil {
- if scp.program != nil {
- if scp.program.Fset != nil {
- pos = scp.program.Fset.Position(fld.Pos()).String()
- }
- }
- }
- return fmt.Errorf("Expr (%s) is unsupported for a schema", pos)
- }
- return nil
-}
-
-func parseJSONTag(field *ast.Field) (name string, ignore bool, isString bool, err error) {
- if len(field.Names) > 0 {
- name = field.Names[0].Name
- }
- if field.Tag != nil && len(strings.TrimSpace(field.Tag.Value)) > 0 {
- tv, err := strconv.Unquote(field.Tag.Value)
- if err != nil {
- return name, false, false, err
- }
-
- if strings.TrimSpace(tv) != "" {
- st := reflect.StructTag(tv)
- jsonParts := strings.Split(st.Get("json"), ",")
- jsonName := jsonParts[0]
-
- if len(jsonParts) > 1 && jsonParts[1] == "string" {
- isString = isFieldStringable(field.Type)
- }
-
- if jsonName == "-" {
- return name, true, isString, nil
- } else if jsonName != "" {
- return jsonName, false, isString, nil
- }
- }
- }
- return name, false, false, nil
-}
-
-// isFieldStringable check if the field type is a scalar. If the field type is
-// *ast.StarExpr and is pointer type, check if it refers to a scalar.
-// Otherwise, the ",string" directive doesn't apply.
-func isFieldStringable(tpe ast.Expr) bool {
- if ident, ok := tpe.(*ast.Ident); ok {
- switch ident.Name {
- case "int", "int8", "int16", "int32", "int64",
- "uint", "uint8", "uint16", "uint32", "uint64",
- "float64", "string", "bool":
- return true
- }
- } else if starExpr, ok := tpe.(*ast.StarExpr); ok {
- return isFieldStringable(starExpr.X)
- } else {
- return false
- }
- return false
-}
diff --git a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/validators.go b/test/tools/vendor/github.com/go-swagger/go-swagger/scan/validators.go
deleted file mode 100644
index 45caf878392..00000000000
--- a/test/tools/vendor/github.com/go-swagger/go-swagger/scan/validators.go
+++ /dev/null
@@ -1,829 +0,0 @@
-//go:build !go1.11
-// +build !go1.11
-
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scan
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/go-openapi/spec"
-)
-
-type validationBuilder interface {
- SetMaximum(float64, bool)
- SetMinimum(float64, bool)
- SetMultipleOf(float64)
-
- SetMinItems(int64)
- SetMaxItems(int64)
-
- SetMinLength(int64)
- SetMaxLength(int64)
- SetPattern(string)
-
- SetUnique(bool)
- SetEnum(string)
- SetDefault(interface{})
- SetExample(interface{})
-}
-
-type valueParser interface {
- Parse([]string) error
- Matches(string) bool
-}
-
-type setMaximum struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setMaximum) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 2 && len(matches[2]) > 0 {
- max, err := strconv.ParseFloat(matches[2], 64)
- if err != nil {
- return err
- }
- sm.builder.SetMaximum(max, matches[1] == "<")
- }
- return nil
-}
-
-func (sm *setMaximum) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-type setMinimum struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setMinimum) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-func (sm *setMinimum) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 2 && len(matches[2]) > 0 {
- min, err := strconv.ParseFloat(matches[2], 64)
- if err != nil {
- return err
- }
- sm.builder.SetMinimum(min, matches[1] == ">")
- }
- return nil
-}
-
-type setMultipleOf struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setMultipleOf) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-func (sm *setMultipleOf) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 2 && len(matches[1]) > 0 {
- multipleOf, err := strconv.ParseFloat(matches[1], 64)
- if err != nil {
- return err
- }
- sm.builder.SetMultipleOf(multipleOf)
- }
- return nil
-}
-
-type setMaxItems struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setMaxItems) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-func (sm *setMaxItems) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- maxItems, err := strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return err
- }
- sm.builder.SetMaxItems(maxItems)
- }
- return nil
-}
-
-type setMinItems struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setMinItems) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-func (sm *setMinItems) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- minItems, err := strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return err
- }
- sm.builder.SetMinItems(minItems)
- }
- return nil
-}
-
-type setMaxLength struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setMaxLength) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- maxLength, err := strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return err
- }
- sm.builder.SetMaxLength(maxLength)
- }
- return nil
-}
-
-func (sm *setMaxLength) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-type setMinLength struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setMinLength) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- minLength, err := strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return err
- }
- sm.builder.SetMinLength(minLength)
- }
- return nil
-}
-
-func (sm *setMinLength) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-type setPattern struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setPattern) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- sm.builder.SetPattern(matches[1])
- }
- return nil
-}
-
-func (sm *setPattern) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-type setCollectionFormat struct {
- builder operationValidationBuilder
- rx *regexp.Regexp
-}
-
-func (sm *setCollectionFormat) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sm.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- sm.builder.SetCollectionFormat(matches[1])
- }
- return nil
-}
-
-func (sm *setCollectionFormat) Matches(line string) bool {
- return sm.rx.MatchString(line)
-}
-
-type setUnique struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (su *setUnique) Matches(line string) bool {
- return su.rx.MatchString(line)
-}
-
-func (su *setUnique) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := su.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- req, err := strconv.ParseBool(matches[1])
- if err != nil {
- return err
- }
- su.builder.SetUnique(req)
- }
- return nil
-}
-
-type setEnum struct {
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (se *setEnum) Matches(line string) bool {
- return se.rx.MatchString(line)
-}
-
-func (se *setEnum) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := se.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- se.builder.SetEnum(matches[1])
- }
- return nil
-}
-
-func parseValueFromSchema(s string, schema *spec.SimpleSchema) (interface{}, error) {
- if schema != nil {
- switch strings.Trim(schema.TypeName(), "\"") {
- case "integer", "int", "int64", "int32", "int16":
- return strconv.Atoi(s)
- case "bool", "boolean":
- return strconv.ParseBool(s)
- case "number", "float64", "float32":
- return strconv.ParseFloat(s, 64)
- case "object":
- var obj map[string]interface{}
- if err := json.Unmarshal([]byte(s), &obj); err != nil {
- // If we can't parse it, just return the string.
- return s, nil
- }
- return obj, nil
- case "array":
- var slice []interface{}
- if err := json.Unmarshal([]byte(s), &slice); err != nil {
- // If we can't parse it, just return the string.
- return s, nil
- }
- return slice, nil
- default:
- return s, nil
- }
- } else {
- return s, nil
- }
-}
-
-type setDefault struct {
- scheme *spec.SimpleSchema
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (sd *setDefault) Matches(line string) bool {
- return sd.rx.MatchString(line)
-}
-
-func (sd *setDefault) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := sd.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- d, err := parseValueFromSchema(matches[1], sd.scheme)
- if err != nil {
- return err
- }
- sd.builder.SetDefault(d)
- }
- return nil
-}
-
-type setExample struct {
- scheme *spec.SimpleSchema
- builder validationBuilder
- rx *regexp.Regexp
-}
-
-func (se *setExample) Matches(line string) bool {
- return se.rx.MatchString(line)
-}
-
-func (se *setExample) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := se.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- d, err := parseValueFromSchema(matches[1], se.scheme)
- if err != nil {
- return err
- }
- se.builder.SetExample(d)
- }
- return nil
-}
-
-type matchOnlyParam struct {
- tgt *spec.Parameter
- rx *regexp.Regexp
-}
-
-func (mo *matchOnlyParam) Matches(line string) bool {
- return mo.rx.MatchString(line)
-}
-
-func (mo *matchOnlyParam) Parse(lines []string) error {
- return nil
-}
-
-type setRequiredParam struct {
- tgt *spec.Parameter
-}
-
-func (su *setRequiredParam) Matches(line string) bool {
- return rxRequired.MatchString(line)
-}
-
-func (su *setRequiredParam) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := rxRequired.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- req, err := strconv.ParseBool(matches[1])
- if err != nil {
- return err
- }
- su.tgt.Required = req
- }
- return nil
-}
-
-type setReadOnlySchema struct {
- tgt *spec.Schema
-}
-
-func (su *setReadOnlySchema) Matches(line string) bool {
- return rxReadOnly.MatchString(line)
-}
-
-func (su *setReadOnlySchema) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := rxReadOnly.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- req, err := strconv.ParseBool(matches[1])
- if err != nil {
- return err
- }
- su.tgt.ReadOnly = req
- }
- return nil
-}
-
-type setDiscriminator struct {
- schema *spec.Schema
- field string
-}
-
-func (su *setDiscriminator) Matches(line string) bool {
- return rxDiscriminator.MatchString(line)
-}
-
-func (su *setDiscriminator) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := rxDiscriminator.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- req, err := strconv.ParseBool(matches[1])
- if err != nil {
- return err
- }
- if req {
- su.schema.Discriminator = su.field
- } else {
- if su.schema.Discriminator == su.field {
- su.schema.Discriminator = ""
- }
- }
- }
- return nil
-}
-
-type setRequiredSchema struct {
- schema *spec.Schema
- field string
-}
-
-func (su *setRequiredSchema) Matches(line string) bool {
- return rxRequired.MatchString(line)
-}
-
-func (su *setRequiredSchema) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := rxRequired.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- req, err := strconv.ParseBool(matches[1])
- if err != nil {
- return err
- }
- midx := -1
- for i, nm := range su.schema.Required {
- if nm == su.field {
- midx = i
- break
- }
- }
- if req {
- if midx < 0 {
- su.schema.Required = append(su.schema.Required, su.field)
- }
- } else if midx >= 0 {
- su.schema.Required = append(su.schema.Required[:midx], su.schema.Required[midx+1:]...)
- }
- }
- return nil
-}
-
-func newMultilineDropEmptyParser(rx *regexp.Regexp, set func([]string)) *multiLineDropEmptyParser {
- return &multiLineDropEmptyParser{
- rx: rx,
- set: set,
- }
-}
-
-type multiLineDropEmptyParser struct {
- set func([]string)
- rx *regexp.Regexp
-}
-
-func (m *multiLineDropEmptyParser) Matches(line string) bool {
- return m.rx.MatchString(line)
-}
-
-func (m *multiLineDropEmptyParser) Parse(lines []string) error {
- m.set(removeEmptyLines(lines))
- return nil
-}
-
-func newSetSchemes(set func([]string)) *setSchemes {
- return &setSchemes{
- set: set,
- rx: rxSchemes,
- }
-}
-
-type setSchemes struct {
- set func([]string)
- rx *regexp.Regexp
-}
-
-func (ss *setSchemes) Matches(line string) bool {
- return ss.rx.MatchString(line)
-}
-
-func (ss *setSchemes) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
- matches := ss.rx.FindStringSubmatch(lines[0])
- if len(matches) > 1 && len(matches[1]) > 0 {
- sch := strings.Split(matches[1], ", ")
-
- var schemes []string
- for _, s := range sch {
- ts := strings.TrimSpace(s)
- if ts != "" {
- schemes = append(schemes, ts)
- }
- }
- ss.set(schemes)
- }
- return nil
-}
-
-func newSetSecurity(rx *regexp.Regexp, setter func([]map[string][]string)) *setSecurity {
- return &setSecurity{
- set: setter,
- rx: rx,
- }
-}
-
-type setSecurity struct {
- set func([]map[string][]string)
- rx *regexp.Regexp
-}
-
-func (ss *setSecurity) Matches(line string) bool {
- return ss.rx.MatchString(line)
-}
-
-func (ss *setSecurity) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
-
- var result []map[string][]string
- for _, line := range lines {
- kv := strings.SplitN(line, ":", 2)
- scopes := []string{}
- var key string
-
- if len(kv) > 1 {
- scs := strings.Split(kv[1], ",")
- for _, scope := range scs {
- tr := strings.TrimSpace(scope)
- if tr != "" {
- tr = strings.SplitAfter(tr, " ")[0]
- scopes = append(scopes, strings.TrimSpace(tr))
- }
- }
-
- key = strings.TrimSpace(kv[0])
-
- result = append(result, map[string][]string{key: scopes})
- }
- }
- ss.set(result)
- return nil
-}
-
-func newSetResponses(definitions map[string]spec.Schema, responses map[string]spec.Response, setter func(*spec.Response, map[int]spec.Response)) *setOpResponses {
- return &setOpResponses{
- set: setter,
- rx: rxResponses,
- definitions: definitions,
- responses: responses,
- }
-}
-
-type setOpResponses struct {
- set func(*spec.Response, map[int]spec.Response)
- rx *regexp.Regexp
- definitions map[string]spec.Schema
- responses map[string]spec.Response
-}
-
-func (ss *setOpResponses) Matches(line string) bool {
- return ss.rx.MatchString(line)
-}
-
-// ResponseTag used when specifying a response to point to a defined swagger:response
-const ResponseTag = "response"
-
-// BodyTag used when specifying a response to point to a model/schema
-const BodyTag = "body"
-
-// DescriptionTag used when specifying a response that gives a description of the response
-const DescriptionTag = "description"
-
-func parseTags(line string) (modelOrResponse string, arrays int, isDefinitionRef bool, description string, err error) {
- tags := strings.Split(line, " ")
- parsedModelOrResponse := false
-
- for i, tagAndValue := range tags {
- tagValList := strings.SplitN(tagAndValue, ":", 2)
- var tag, value string
- if len(tagValList) > 1 {
- tag = tagValList[0]
- value = tagValList[1]
- } else {
- //TODO: Print a warning, and in the long term, do not support not tagged values
- //Add a default tag if none is supplied
- if i == 0 {
- tag = ResponseTag
- } else {
- tag = DescriptionTag
- }
- value = tagValList[0]
- }
-
- foundModelOrResponse := false
- if !parsedModelOrResponse {
- if tag == BodyTag {
- foundModelOrResponse = true
- isDefinitionRef = true
- }
- if tag == ResponseTag {
- foundModelOrResponse = true
- isDefinitionRef = false
- }
- }
- if foundModelOrResponse {
- //Read the model or response tag
- parsedModelOrResponse = true
- //Check for nested arrays
- arrays = 0
- for strings.HasPrefix(value, "[]") {
- arrays++
- value = value[2:]
- }
- //What's left over is the model name
- modelOrResponse = value
- } else {
- foundDescription := false
- if tag == DescriptionTag {
- foundDescription = true
- }
- if foundDescription {
- //Descriptions are special, they make they read the rest of the line
- descriptionWords := []string{value}
- if i < len(tags)-1 {
- descriptionWords = append(descriptionWords, tags[i+1:]...)
- }
- description = strings.Join(descriptionWords, " ")
- break
- } else {
- if tag == ResponseTag || tag == BodyTag || tag == DescriptionTag {
- err = fmt.Errorf("Found valid tag %s, but not in a valid position", tag)
- } else {
- err = fmt.Errorf("Found invalid tag: %s", tag)
- }
- //return error
- return
- }
- }
- }
-
- //TODO: Maybe do, if !parsedModelOrResponse {return some error}
- return
-}
-
-func (ss *setOpResponses) Parse(lines []string) error {
- if len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {
- return nil
- }
-
- var def *spec.Response
- var scr map[int]spec.Response
-
- for _, line := range lines {
- kv := strings.SplitN(line, ":", 2)
- var key, value string
-
- if len(kv) > 1 {
- key = strings.TrimSpace(kv[0])
- if key == "" {
- // this must be some weird empty line
- continue
- }
- value = strings.TrimSpace(kv[1])
- if value == "" {
- var resp spec.Response
- if strings.EqualFold("default", key) {
- if def == nil {
- def = &resp
- }
- } else {
- if sc, err := strconv.Atoi(key); err == nil {
- if scr == nil {
- scr = make(map[int]spec.Response)
- }
- scr[sc] = resp
- }
- }
- continue
- }
- refTarget, arrays, isDefinitionRef, description, err := parseTags(value)
- if err != nil {
- return err
- }
- //A possible exception for having a definition
- if _, ok := ss.responses[refTarget]; !ok {
- if _, ok := ss.definitions[refTarget]; ok {
- isDefinitionRef = true
- }
- }
-
- var ref spec.Ref
- if isDefinitionRef {
- if description == "" {
- description = refTarget
- }
- ref, err = spec.NewRef("#/definitions/" + refTarget)
- } else {
- ref, err = spec.NewRef("#/responses/" + refTarget)
- }
- if err != nil {
- return err
- }
-
- // description should used on anyway.
- resp := spec.Response{ResponseProps: spec.ResponseProps{Description: description}}
-
- if isDefinitionRef {
- resp.Schema = new(spec.Schema)
- resp.Description = description
- if arrays == 0 {
- resp.Schema.Ref = ref
- } else {
- cs := resp.Schema
- for i := 0; i < arrays; i++ {
- cs.Typed("array", "")
- cs.Items = new(spec.SchemaOrArray)
- cs.Items.Schema = new(spec.Schema)
- cs = cs.Items.Schema
- }
- cs.Ref = ref
- }
- // ref. could be empty while use description tag
- } else if len(refTarget) > 0 {
- resp.Ref = ref
- }
-
- if strings.EqualFold("default", key) {
- if def == nil {
- def = &resp
- }
- } else {
- if sc, err := strconv.Atoi(key); err == nil {
- if scr == nil {
- scr = make(map[int]spec.Response)
- }
- scr[sc] = resp
- }
- }
- }
- }
- ss.set(def, scr)
- return nil
-}
-
-func parseEnum(val string, s *spec.SimpleSchema) []interface{} {
- list := strings.Split(val, ",")
- interfaceSlice := make([]interface{}, len(list))
- for i, d := range list {
- v, err := parseValueFromSchema(d, s)
- if err != nil {
- interfaceSlice[i] = d
- continue
- }
-
- interfaceSlice[i] = v
- }
- return interfaceSlice
-}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.editorconfig
new file mode 100644
index 00000000000..1f664d13a5f
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.editorconfig
@@ -0,0 +1,18 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.go]
+indent_style = tab
+
+[{Makefile,*.mk}]
+indent_style = tab
+
+[*.nix]
+indent_size = 2
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/.envrc b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.envrc
new file mode 100644
index 00000000000..2e0f9f5f711
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.envrc
@@ -0,0 +1,4 @@
+if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
+ source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
+fi
+use flake . --impure
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.gitignore
new file mode 100644
index 00000000000..470e7ca2bd2
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.gitignore
@@ -0,0 +1,6 @@
+/.devenv/
+/.direnv/
+/.pre-commit-config.yaml
+/bin/
+/build/
+/var/
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
new file mode 100644
index 00000000000..763143aa77a
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
@@ -0,0 +1,23 @@
+run:
+ timeout: 5m
+
+linters-settings:
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/go-viper/mapstructure)
+ golint:
+ min-confidence: 0
+ goimports:
+ local-prefixes: github.com/go-viper/maptstructure
+
+linters:
+ disable-all: true
+ enable:
+ - gci
+ - gofmt
+ - gofumpt
+ - goimports
+ - staticcheck
+ # - stylecheck
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/test/tools/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
similarity index 92%
rename from vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
rename to test/tools/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
index c758234904e..afd44e5f5fc 100644
--- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
@@ -1,3 +1,11 @@
+> [!WARNING]
+> As of v2 of this library, change log can be found in GitHub releases.
+
+## 1.5.1
+
+* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
+* Fix map of slices not decoding properly in certain cases. [GH-266]
+
## 1.5.0
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/test/tools/vendor/github.com/go-viper/mapstructure/v2/LICENSE
similarity index 100%
rename from vendor/github.com/mitchellh/mapstructure/LICENSE
rename to test/tools/vendor/github.com/go-viper/mapstructure/v2/LICENSE
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/README.md b/test/tools/vendor/github.com/go-viper/mapstructure/v2/README.md
new file mode 100644
index 00000000000..dd5ec69ddf7
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/README.md
@@ -0,0 +1,80 @@
+# mapstructure
+
+[](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI)
+[](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
+
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+```shell
+go get github.com/go-viper/mapstructure/v2
+```
+
+## Migrating from `github.com/mitchellh/mapstructure`
+
+[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
+
+You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
+The API is the same, so you don't need to change anything else.
+
+Here is a script that can help you with the migration:
+
+```shell
+sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go')
+```
+
+If you need more time to migrate your code, that is absolutely fine.
+
+Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
+
+```shell
+replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
+```
+
+## Usage & Example
+
+For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+ "type": "person",
+ "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
+
+## Credits
+
+Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
+This is a maintained fork of the original library.
+
+Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
+
+## License
+
+The project is licensed under the [MIT License](LICENSE).
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/test/tools/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
new file mode 100644
index 00000000000..1f3c69d4b8c
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
@@ -0,0 +1,630 @@
+package mapstructure
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+ // Create variables here so we can reference them with the reflect pkg
+ var f1 DecodeHookFuncType
+ var f2 DecodeHookFuncKind
+ var f3 DecodeHookFuncValue
+
+ // Fill in the variables into this interface and the rest is done
+ // automatically using the reflect package.
+ potential := []interface{}{f1, f2, f3}
+
+ v := reflect.ValueOf(h)
+ vt := v.Type()
+ for _, raw := range potential {
+ pt := reflect.ValueOf(raw).Type()
+ if vt.ConvertibleTo(pt) {
+ return v.Convert(pt).Interface()
+ }
+ }
+
+ return nil
+}
+
+// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into a closure to be used directly
+// if the type fails to convert we return a closure always erroring to keep the previous behaviour
+func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return f(from.Type(), to.Type(), from.Interface())
+ }
+ case DecodeHookFuncKind:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return f(from.Kind(), to.Kind(), from.Interface())
+ }
+ case DecodeHookFuncValue:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return f(from, to)
+ }
+ default:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return nil, errors.New("invalid decode hook signature")
+ }
+ }
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+ raw DecodeHookFunc,
+ from reflect.Value, to reflect.Value,
+) (interface{}, error) {
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return f(from.Type(), to.Type(), from.Interface())
+ case DecodeHookFuncKind:
+ return f(from.Kind(), to.Kind(), from.Interface())
+ case DecodeHookFuncValue:
+ return f(from, to)
+ default:
+ return nil, errors.New("invalid decode hook signature")
+ }
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+ cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs))
+ for _, f := range fs {
+ cached = append(cached, cachedDecodeHook(f))
+ }
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ var err error
+ data := f.Interface()
+
+ newFrom := f
+ for _, c := range cached {
+ data, err = c(newFrom, t)
+ if err != nil {
+ return nil, err
+ }
+ newFrom = reflect.ValueOf(data)
+ }
+
+ return data, nil
+ }
+}
+
+// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
+// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
+func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
+ cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff))
+ for _, f := range ff {
+ cached = append(cached, cachedDecodeHook(f))
+ }
+ return func(a, b reflect.Value) (interface{}, error) {
+ var allErrs string
+ var out interface{}
+ var err error
+
+ for _, c := range cached {
+ out, err = c(a, b)
+ if err != nil {
+ allErrs += err.Error() + "\n"
+ continue
+ }
+
+ return out, nil
+ }
+
+ return nil, errors.New(allErrs)
+ }
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.SliceOf(f) {
+ return data, nil
+ }
+
+ raw := data.(string)
+ if raw == "" {
+ return []string{}, nil
+ }
+
+ return strings.Split(raw, sep), nil
+ }
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Duration(5)) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.ParseDuration(data.(string))
+ }
+}
+
+// StringToURLHookFunc returns a DecodeHookFunc that converts
+// strings to *url.URL.
+func StringToURLHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(&url.URL{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return url.Parse(data.(string))
+ }
+}
+
+// StringToIPHookFunc returns a DecodeHookFunc that converts
+// strings to net.IP
+func StringToIPHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IP{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ ip := net.ParseIP(data.(string))
+ if ip == nil {
+ return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
+ }
+
+ return ip, nil
+ }
+}
+
+// StringToIPNetHookFunc returns a DecodeHookFunc that converts
+// strings to net.IPNet
+func StringToIPNetHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IPNet{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ _, net, err := net.ParseCIDR(data.(string))
+ return net, err
+ }
+}
+
+// StringToTimeHookFunc returns a DecodeHookFunc that converts
+// strings to time.Time.
+func StringToTimeHookFunc(layout string) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Time{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.Parse(layout, data.(string))
+ }
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{},
+) (interface{}, error) {
+ dataVal := reflect.ValueOf(data)
+ switch t {
+ case reflect.String:
+ switch f {
+ case reflect.Bool:
+ if dataVal.Bool() {
+ return "1", nil
+ }
+ return "0", nil
+ case reflect.Float32:
+ return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+ case reflect.Int:
+ return strconv.FormatInt(dataVal.Int(), 10), nil
+ case reflect.Slice:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ if elemKind == reflect.Uint8 {
+ return string(dataVal.Interface().([]uint8)), nil
+ }
+ case reflect.Uint:
+ return strconv.FormatUint(dataVal.Uint(), 10), nil
+ }
+ }
+
+ return data, nil
+}
+
+func RecursiveStructToMapHookFunc() DecodeHookFunc {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ if f.Kind() != reflect.Struct {
+ return f.Interface(), nil
+ }
+
+ var i interface{} = struct{}{}
+ if t.Type() != reflect.TypeOf(&i).Elem() {
+ return f.Interface(), nil
+ }
+
+ m := make(map[string]interface{})
+ t.Set(reflect.ValueOf(m))
+
+ return f.Interface(), nil
+ }
+}
+
+// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
+// strings to the UnmarshalText function, when the target type
+// implements the encoding.TextUnmarshaler interface
+func TextUnmarshallerHookFunc() DecodeHookFuncType {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ result := reflect.New(t).Interface()
+ unmarshaller, ok := result.(encoding.TextUnmarshaler)
+ if !ok {
+ return data, nil
+ }
+ str, ok := data.(string)
+ if !ok {
+ str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
+ }
+ if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+}
+
+// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
+// strings to netip.Addr.
+func StringToNetIPAddrHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(netip.Addr{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return netip.ParseAddr(data.(string))
+ }
+}
+
+// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
+// strings to netip.AddrPort.
+func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(netip.AddrPort{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return netip.ParseAddrPort(data.(string))
+ }
+}
+
+// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
+// strings to basic types.
+// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
+func StringToBasicTypeHookFunc() DecodeHookFunc {
+ return ComposeDecodeHookFunc(
+ StringToInt8HookFunc(),
+ StringToUint8HookFunc(),
+ StringToInt16HookFunc(),
+ StringToUint16HookFunc(),
+ StringToInt32HookFunc(),
+ StringToUint32HookFunc(),
+ StringToInt64HookFunc(),
+ StringToUint64HookFunc(),
+ StringToIntHookFunc(),
+ StringToUintHookFunc(),
+ StringToFloat32HookFunc(),
+ StringToFloat64HookFunc(),
+ StringToBoolHookFunc(),
+ // byte and rune are aliases for uint8 and int32 respectively
+ // StringToByteHookFunc(),
+ // StringToRuneHookFunc(),
+ StringToComplex64HookFunc(),
+ StringToComplex128HookFunc(),
+ )
+}
+
+// StringToInt8HookFunc returns a DecodeHookFunc that converts
+// strings to int8.
+func StringToInt8HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 8)
+ return int8(i64), err
+ }
+}
+
+// StringToUint8HookFunc returns a DecodeHookFunc that converts
+// strings to uint8.
+func StringToUint8HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 8)
+ return uint8(u64), err
+ }
+}
+
+// StringToInt16HookFunc returns a DecodeHookFunc that converts
+// strings to int16.
+func StringToInt16HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 16)
+ return int16(i64), err
+ }
+}
+
+// StringToUint16HookFunc returns a DecodeHookFunc that converts
+// strings to uint16.
+func StringToUint16HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 16)
+ return uint16(u64), err
+ }
+}
+
+// StringToInt32HookFunc returns a DecodeHookFunc that converts
+// strings to int32.
+func StringToInt32HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 32)
+ return int32(i64), err
+ }
+}
+
+// StringToUint32HookFunc returns a DecodeHookFunc that converts
+// strings to uint32.
+func StringToUint32HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 32)
+ return uint32(u64), err
+ }
+}
+
+// StringToInt64HookFunc returns a DecodeHookFunc that converts
+// strings to int64.
+func StringToInt64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseInt(data.(string), 0, 64)
+ }
+}
+
+// StringToUint64HookFunc returns a DecodeHookFunc that converts
+// strings to uint64.
+func StringToUint64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseUint(data.(string), 0, 64)
+ }
+}
+
+// StringToIntHookFunc returns a DecodeHookFunc that converts
+// strings to int.
+func StringToIntHookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 0)
+ return int(i64), err
+ }
+}
+
+// StringToUintHookFunc returns a DecodeHookFunc that converts
+// strings to uint.
+func StringToUintHookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 0)
+ return uint(u64), err
+ }
+}
+
+// StringToFloat32HookFunc returns a DecodeHookFunc that converts
+// strings to float32.
+func StringToFloat32HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ f64, err := strconv.ParseFloat(data.(string), 32)
+ return float32(f64), err
+ }
+}
+
+// StringToFloat64HookFunc returns a DecodeHookFunc that converts
+// strings to float64.
+func StringToFloat64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseFloat(data.(string), 64)
+ }
+}
+
+// StringToBoolHookFunc returns a DecodeHookFunc that converts
+// strings to bool.
+func StringToBoolHookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseBool(data.(string))
+ }
+}
+
+// StringToByteHookFunc returns a DecodeHookFunc that converts
+// strings to byte.
+func StringToByteHookFunc() DecodeHookFunc {
+ return StringToUint8HookFunc()
+}
+
+// StringToRuneHookFunc returns a DecodeHookFunc that converts
+// strings to rune.
+func StringToRuneHookFunc() DecodeHookFunc {
+ return StringToInt32HookFunc()
+}
+
+// StringToComplex64HookFunc returns a DecodeHookFunc that converts
+// strings to complex64.
+func StringToComplex64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ c128, err := strconv.ParseComplex(data.(string), 64)
+ return complex64(c128), err
+ }
+}
+
+// StringToComplex128HookFunc returns a DecodeHookFunc that converts
+// strings to complex128.
+func StringToComplex128HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseComplex(data.(string), 128)
+ }
+}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/test/tools/vendor/github.com/go-viper/mapstructure/v2/flake.lock
new file mode 100644
index 00000000000..4bea8154e04
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/flake.lock
@@ -0,0 +1,472 @@
+{
+ "nodes": {
+ "cachix": {
+ "inputs": {
+ "devenv": "devenv_2",
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "pre-commit-hooks": [
+ "devenv",
+ "pre-commit-hooks"
+ ]
+ },
+ "locked": {
+ "lastModified": 1712055811,
+ "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
+ "owner": "cachix",
+ "repo": "cachix",
+ "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "cachix",
+ "type": "github"
+ }
+ },
+ "devenv": {
+ "inputs": {
+ "cachix": "cachix",
+ "flake-compat": "flake-compat_2",
+ "nix": "nix_2",
+ "nixpkgs": "nixpkgs_2",
+ "pre-commit-hooks": "pre-commit-hooks"
+ },
+ "locked": {
+ "lastModified": 1717245169,
+ "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "devenv_2": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "cachix",
+ "flake-compat"
+ ],
+ "nix": "nix",
+ "nixpkgs": "nixpkgs",
+ "poetry2nix": "poetry2nix",
+ "pre-commit-hooks": [
+ "devenv",
+ "cachix",
+ "pre-commit-hooks"
+ ]
+ },
+ "locked": {
+ "lastModified": 1708704632,
+ "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "ref": "python-rewrite",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-compat_2": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1696426674,
+ "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-parts": {
+ "inputs": {
+ "nixpkgs-lib": "nixpkgs-lib"
+ },
+ "locked": {
+ "lastModified": 1717285511,
+ "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1689068808,
+ "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "flake-utils_2": {
+ "inputs": {
+ "systems": "systems_2"
+ },
+ "locked": {
+ "lastModified": 1710146030,
+ "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "pre-commit-hooks",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1709087332,
+ "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "nix": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "nixpkgs": [
+ "devenv",
+ "cachix",
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression"
+ },
+ "locked": {
+ "lastModified": 1712911606,
+ "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "devenv-2.21",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nix-github-actions": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "cachix",
+ "devenv",
+ "poetry2nix",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1688870561,
+ "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
+ "owner": "nix-community",
+ "repo": "nix-github-actions",
+ "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-community",
+ "repo": "nix-github-actions",
+ "type": "github"
+ }
+ },
+ "nix_2": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression_2"
+ },
+ "locked": {
+ "lastModified": 1712911606,
+ "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "devenv-2.21",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1692808169,
+ "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-lib": {
+ "locked": {
+ "lastModified": 1717284937,
+ "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
+ "type": "tarball",
+ "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
+ },
+ "original": {
+ "type": "tarball",
+ "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
+ }
+ },
+ "nixpkgs-regression": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-regression_2": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-stable": {
+ "locked": {
+ "lastModified": 1710695816,
+ "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "614b4613980a522ba49f0d194531beddbb7220d3",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-23.11",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+ "lastModified": 1713361204,
+ "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
+ "owner": "cachix",
+ "repo": "devenv-nixpkgs",
+ "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "ref": "rolling",
+ "repo": "devenv-nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_3": {
+ "locked": {
+ "lastModified": 1717112898,
+ "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "poetry2nix": {
+ "inputs": {
+ "flake-utils": "flake-utils",
+ "nix-github-actions": "nix-github-actions",
+ "nixpkgs": [
+ "devenv",
+ "cachix",
+ "devenv",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1692876271,
+ "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
+ "owner": "nix-community",
+ "repo": "poetry2nix",
+ "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-community",
+ "repo": "poetry2nix",
+ "type": "github"
+ }
+ },
+ "pre-commit-hooks": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "flake-utils": "flake-utils_2",
+ "gitignore": "gitignore",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+ "lastModified": 1713775815,
+ "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "devenv": "devenv",
+ "flake-parts": "flake-parts",
+ "nixpkgs": "nixpkgs_3"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ },
+ "systems_2": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/test/tools/vendor/github.com/go-viper/mapstructure/v2/flake.nix
new file mode 100644
index 00000000000..4ed0f53311b
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/flake.nix
@@ -0,0 +1,39 @@
+{
+ inputs = {
+ nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+ flake-parts.url = "github:hercules-ci/flake-parts";
+ devenv.url = "github:cachix/devenv";
+ };
+
+ outputs = inputs@{ flake-parts, ... }:
+ flake-parts.lib.mkFlake { inherit inputs; } {
+ imports = [
+ inputs.devenv.flakeModule
+ ];
+
+ systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
+
+ perSystem = { config, self', inputs', pkgs, system, ... }: rec {
+ devenv.shells = {
+ default = {
+ languages = {
+ go.enable = true;
+ };
+
+ pre-commit.hooks = {
+ nixpkgs-fmt.enable = true;
+ };
+
+ packages = with pkgs; [
+ golangci-lint
+ ];
+
+ # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
+ containers = pkgs.lib.mkForce { };
+ };
+
+ ci = devenv.shells.default;
+ };
+ };
+ };
+}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
new file mode 100644
index 00000000000..d1c15e474f4
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
@@ -0,0 +1,11 @@
+package errors
+
+import "errors"
+
+func New(text string) error {
+ return errors.New(text)
+}
+
+func As(err error, target interface{}) bool {
+ return errors.As(err, target)
+}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
new file mode 100644
index 00000000000..d74e3a0b5a4
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
@@ -0,0 +1,9 @@
+//go:build go1.20
+
+package errors
+
+import "errors"
+
+func Join(errs ...error) error {
+ return errors.Join(errs...)
+}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
new file mode 100644
index 00000000000..700b40229cb
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
@@ -0,0 +1,61 @@
+//go:build !go1.20
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errors
+
+// Join returns an error that wraps the given errors.
+// Any nil error values are discarded.
+// Join returns nil if every value in errs is nil.
+// The error formats as the concatenation of the strings obtained
+// by calling the Error method of each element of errs, with a newline
+// between each string.
+//
+// A non-nil error returned by Join implements the Unwrap() []error method.
+func Join(errs ...error) error {
+ n := 0
+ for _, err := range errs {
+ if err != nil {
+ n++
+ }
+ }
+ if n == 0 {
+ return nil
+ }
+ e := &joinError{
+ errs: make([]error, 0, n),
+ }
+ for _, err := range errs {
+ if err != nil {
+ e.errs = append(e.errs, err)
+ }
+ }
+ return e
+}
+
+type joinError struct {
+ errs []error
+}
+
+func (e *joinError) Error() string {
+ // Since Join returns nil if every value in errs is nil,
+ // e.errs cannot be empty.
+ if len(e.errs) == 1 {
+ return e.errs[0].Error()
+ }
+
+ b := []byte(e.errs[0].Error())
+ for _, err := range e.errs[1:] {
+ b = append(b, '\n')
+ b = append(b, err.Error()...)
+ }
+ // At this point, b has at least one byte '\n'.
+ // return unsafe.String(&b[0], len(b))
+ return string(b)
+}
+
+func (e *joinError) Unwrap() []error {
+ return e.errs
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/test/tools/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
similarity index 87%
rename from vendor/github.com/mitchellh/mapstructure/mapstructure.go
rename to test/tools/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
index 1efb22ac361..e77e63ba383 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
@@ -9,84 +9,84 @@
//
// The simplest function to start with is Decode.
//
-// Field Tags
+// # Field Tags
//
// When decoding to a struct, mapstructure will use the field name by
// default to perform the mapping. For example, if a struct has a field
// "Username" then mapstructure will look for a key in the source value
// of "username" (case insensitive).
//
-// type User struct {
-// Username string
-// }
+// type User struct {
+// Username string
+// }
//
// You can change the behavior of mapstructure by using struct tags.
// The default struct tag that mapstructure looks for is "mapstructure"
// but you can customize it using DecoderConfig.
//
-// Renaming Fields
+// # Renaming Fields
//
// To rename the key that mapstructure looks for, use the "mapstructure"
// tag and set a value directly. For example, to change the "username" example
// above to "user":
//
-// type User struct {
-// Username string `mapstructure:"user"`
-// }
+// type User struct {
+// Username string `mapstructure:"user"`
+// }
//
-// Embedded Structs and Squashing
+// # Embedded Structs and Squashing
//
// Embedded structs are treated as if they're another field with that name.
// By default, the two structs below are equivalent when decoding with
// mapstructure:
//
-// type Person struct {
-// Name string
-// }
+// type Person struct {
+// Name string
+// }
//
-// type Friend struct {
-// Person
-// }
+// type Friend struct {
+// Person
+// }
//
-// type Friend struct {
-// Person Person
-// }
+// type Friend struct {
+// Person Person
+// }
//
// This would require an input that looks like below:
//
-// map[string]interface{}{
-// "person": map[string]interface{}{"name": "alice"},
-// }
+// map[string]interface{}{
+// "person": map[string]interface{}{"name": "alice"},
+// }
//
// If your "person" value is NOT nested, then you can append ",squash" to
// your tag value and mapstructure will treat it as if the embedded struct
// were part of the struct directly. Example:
//
-// type Friend struct {
-// Person `mapstructure:",squash"`
-// }
+// type Friend struct {
+// Person `mapstructure:",squash"`
+// }
//
// Now the following input would be accepted:
//
-// map[string]interface{}{
-// "name": "alice",
-// }
+// map[string]interface{}{
+// "name": "alice",
+// }
//
// When decoding from a struct to a map, the squash tag squashes the struct
// fields into a single map. Using the example structs from above:
//
-// Friend{Person: Person{Name: "alice"}}
+// Friend{Person: Person{Name: "alice"}}
//
// Will be decoded into a map:
//
-// map[string]interface{}{
-// "name": "alice",
-// }
+// map[string]interface{}{
+// "name": "alice",
+// }
//
// DecoderConfig has a field that changes the behavior of mapstructure
// to always squash embedded structs.
//
-// Remainder Values
+// # Remainder Values
//
// If there are any unmapped keys in the source value, mapstructure by
// default will silently ignore them. You can error by setting ErrorUnused
@@ -98,20 +98,20 @@
// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
// See example below:
//
-// type Friend struct {
-// Name string
-// Other map[string]interface{} `mapstructure:",remain"`
-// }
+// type Friend struct {
+// Name string
+// Other map[string]interface{} `mapstructure:",remain"`
+// }
//
// Given the input below, Other would be populated with the other
// values that weren't used (everything but "name"):
//
-// map[string]interface{}{
-// "name": "bob",
-// "address": "123 Maple St.",
-// }
+// map[string]interface{}{
+// "name": "bob",
+// "address": "123 Maple St.",
+// }
//
-// Omit Empty Values
+// # Omit Empty Values
//
// When decoding from a struct to any other value, you may use the
// ",omitempty" suffix on your tag to omit that value if it equates to
@@ -122,37 +122,37 @@
// field value is zero and a numeric type, the field is empty, and it won't
// be encoded into the destination type.
//
-// type Source struct {
-// Age int `mapstructure:",omitempty"`
-// }
+// type Source struct {
+// Age int `mapstructure:",omitempty"`
+// }
//
-// Unexported fields
+// # Unexported fields
//
// Since unexported (private) struct fields cannot be set outside the package
// where they are defined, the decoder will simply skip them.
//
// For this output type definition:
//
-// type Exported struct {
-// private string // this unexported field will be skipped
-// Public string
-// }
+// type Exported struct {
+// private string // this unexported field will be skipped
+// Public string
+// }
//
// Using this map as input:
//
-// map[string]interface{}{
-// "private": "I will be ignored",
-// "Public": "I made it through!",
-// }
+// map[string]interface{}{
+// "private": "I will be ignored",
+// "Public": "I made it through!",
+// }
//
// The following struct will be decoded:
//
-// type Exported struct {
-// private: "" // field is left with an empty string (zero value)
-// Public: "I made it through!"
-// }
+// type Exported struct {
+// private: "" // field is left with an empty string (zero value)
+// Public: "I made it through!"
+// }
//
-// Other Configuration
+// # Other Configuration
//
// mapstructure is highly configurable. See the DecoderConfig struct
// for other features and options that are supported.
@@ -160,12 +160,13 @@ package mapstructure
import (
"encoding/json"
- "errors"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
+
+ "github.com/go-viper/mapstructure/v2/internal/errors"
)
// DecodeHookFunc is the callback function that can be used for
@@ -265,6 +266,10 @@ type DecoderConfig struct {
// defaults to "mapstructure"
TagName string
+ // The option of the value in the tag that indicates a field should
+ // be squashed. This defaults to "squash".
+ SquashTagOption string
+
// IgnoreUntaggedFields ignores all struct fields without explicit
// TagName, comparable to `mapstructure:"-"` as default behaviour.
IgnoreUntaggedFields bool
@@ -273,6 +278,10 @@ type DecoderConfig struct {
// field name or tag. Defaults to `strings.EqualFold`. This can be used
// to implement case-sensitive tag values, support snake casing, etc.
MatchName func(mapKey, fieldName string) bool
+
+ // DecodeNil, if set to true, will cause the DecodeHook (if present) to run
+ // even if the input is nil. This can be used to provide default values.
+ DecodeNil bool
}
// A Decoder takes a raw interface value and turns it into structured
@@ -282,7 +291,8 @@ type DecoderConfig struct {
// structure. The top-level Decode method is just a convenience that sets
// up the most basic Decoder.
type Decoder struct {
- config *DecoderConfig
+ config *DecoderConfig
+ cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error)
}
// Metadata contains information about decoding a structure that
@@ -400,6 +410,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
config.TagName = "mapstructure"
}
+ if config.SquashTagOption == "" {
+ config.SquashTagOption = "squash"
+ }
+
if config.MatchName == nil {
config.MatchName = strings.EqualFold
}
@@ -407,6 +421,9 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
result := &Decoder{
config: config,
}
+ if config.DecodeHook != nil {
+ result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook)
+ }
return result, nil
}
@@ -414,22 +431,37 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
// Decode decodes the given raw interface to the target pointer specified
// by the configuration.
func (d *Decoder) Decode(input interface{}) error {
- return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+ err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+
+ // Retain some of the original behavior when multiple errors ocurr
+ var joinedErr interface{ Unwrap() []error }
+ if errors.As(err, &joinedErr) {
+ return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err)
+ }
+
+ return err
+}
+
+// isNil returns true if the input is nil or a typed nil pointer.
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ val := reflect.ValueOf(input)
+ return val.Kind() == reflect.Ptr && val.IsNil()
}
// Decodes an unknown data type into a specific reflection value.
func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
- var inputVal reflect.Value
- if input != nil {
- inputVal = reflect.ValueOf(input)
-
- // We need to check here if input is a typed nil. Typed nils won't
- // match the "input == nil" below so we check that here.
- if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
- input = nil
- }
+ var (
+ inputVal = reflect.ValueOf(input)
+ outputKind = getKind(outVal)
+ decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil
+ )
+ if isNil(input) {
+ // Typed nils won't match the "input == nil" below, so reset input.
+ input = nil
}
-
if input == nil {
// If the data is nil, then we don't set anything, unless ZeroFields is set
// to true.
@@ -440,30 +472,46 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
}
- return nil
+ if !decodeNil {
+ return nil
+ }
}
-
if !inputVal.IsValid() {
- // If the input value is invalid, then we just set the value
- // to be the zero value.
- outVal.Set(reflect.Zero(outVal.Type()))
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ if !decodeNil {
+ // If the input value is invalid, then we just set the value
+ // to be the zero value.
+ outVal.Set(reflect.Zero(outVal.Type()))
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+ return nil
+ }
+ // Hooks need a valid inputVal, so reset it to zero value of outVal type.
+ switch outputKind {
+ case reflect.Struct, reflect.Map:
+ var mapVal map[string]interface{}
+ inputVal = reflect.ValueOf(mapVal) // create nil map pointer
+ case reflect.Slice, reflect.Array:
+ var sliceVal []interface{}
+ inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer
+ default:
+ inputVal = reflect.Zero(outVal.Type())
}
- return nil
}
- if d.config.DecodeHook != nil {
+ if d.cachedDecodeHook != nil {
// We have a DecodeHook, so let's pre-process the input.
var err error
- input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
+ input, err = d.cachedDecodeHook(inputVal, outVal)
if err != nil {
- return fmt.Errorf("error decoding '%s': %s", name, err)
+ return fmt.Errorf("error decoding '%s': %w", name, err)
}
}
+ if isNil(input) {
+ return nil
+ }
var err error
- outputKind := getKind(outVal)
addMetaKey := true
switch outputKind {
case reflect.Bool:
@@ -478,6 +526,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
err = d.decodeUint(name, input, outVal)
case reflect.Float32:
err = d.decodeFloat(name, input, outVal)
+ case reflect.Complex64:
+ err = d.decodeComplex(name, input, outVal)
case reflect.Struct:
err = d.decodeStruct(name, input, outVal)
case reflect.Map:
@@ -742,8 +792,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e
}
default:
return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
- name, val.Type(), dataVal.Type(), data)
+ "'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'",
+ name, val, dataVal, data)
}
return nil
@@ -796,6 +846,22 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
return nil
}
+func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Complex64:
+ val.SetComplex(dataVal.Complex())
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
valType := val.Type()
valKeyType := valType.Key()
@@ -811,8 +877,14 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
valMap = reflect.MakeMap(mapType)
}
+ dataVal := reflect.ValueOf(data)
+
+ // Resolve any levels of indirection
+ for dataVal.Kind() == reflect.Pointer {
+ dataVal = reflect.Indirect(dataVal)
+ }
+
// Check input type and based on the input type jump to the proper func
- dataVal := reflect.Indirect(reflect.ValueOf(data))
switch dataVal.Kind() {
case reflect.Map:
return d.decodeMapFromMap(name, dataVal, val, valMap)
@@ -857,7 +929,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
valElemType := valType.Elem()
// Accumulate errors
- errors := make([]string, 0)
+ var errs []error
// If the input data is empty, then we just match what the input data is.
if dataVal.Len() == 0 {
@@ -879,7 +951,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
// First decode the key into the proper type
currentKey := reflect.Indirect(reflect.New(valKeyType))
if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
continue
}
@@ -887,7 +959,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
v := dataVal.MapIndex(k).Interface()
currentVal := reflect.Indirect(reflect.New(valElemType))
if err := d.decode(fieldName, v, currentVal); err != nil {
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
continue
}
@@ -897,12 +969,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
// Set the built up map to the value
val.Set(valMap)
- // If we had errors, return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
+ return errors.Join(errs...)
}
func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
@@ -945,7 +1012,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
}
// If "squash" is specified in the tag, we squash the field down.
- squash = squash || strings.Index(tagValue[index+1:], "squash") != -1
+ squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption)
if squash {
// When squashing, the embedded type can be a pointer to a struct.
if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
@@ -956,6 +1023,18 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
if v.Kind() != reflect.Struct {
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
}
+ } else {
+ if strings.Index(tagValue[index+1:], "remain") != -1 {
+ if v.Kind() != reflect.Map {
+ return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type())
+ }
+
+ ptr := v.MapRange()
+ for ptr.Next() {
+ valMap.SetMapIndex(ptr.Key(), ptr.Value())
+ }
+ continue
+ }
}
if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
keyName = keyNameTagValue
@@ -1123,10 +1202,12 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
if valSlice.IsNil() || d.config.ZeroFields {
// Make a new slice to hold our result, same size as the original data.
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+ } else if valSlice.Len() > dataVal.Len() {
+ valSlice = valSlice.Slice(0, dataVal.Len())
}
// Accumulate any errors
- errors := make([]string, 0)
+ var errs []error
for i := 0; i < dataVal.Len(); i++ {
currentData := dataVal.Index(i).Interface()
@@ -1137,19 +1218,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
fieldName := name + "[" + strconv.Itoa(i) + "]"
if err := d.decode(fieldName, currentData, currentField); err != nil {
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
}
}
// Finally, set the value to the slice we built up
val.Set(valSlice)
- // If there were errors, we return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
+ return errors.Join(errs...)
}
func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
@@ -1161,7 +1237,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
valArray := val
- if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+ if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
// Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
if d.config.WeaklyTypedInput {
@@ -1188,7 +1264,6 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
if dataVal.Len() > arrayType.Len() {
return fmt.Errorf(
"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
-
}
// Make a new array to hold our result, same size as the original data.
@@ -1196,7 +1271,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
}
// Accumulate any errors
- errors := make([]string, 0)
+ var errs []error
for i := 0; i < dataVal.Len(); i++ {
currentData := dataVal.Index(i).Interface()
@@ -1204,19 +1279,14 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
fieldName := name + "[" + strconv.Itoa(i) + "]"
if err := d.decode(fieldName, currentData, currentField); err != nil {
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
}
}
// Finally, set the value to the array we built up
val.Set(valArray)
- // If there were errors, we return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
+ return errors.Join(errs...)
}
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
@@ -1278,7 +1348,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
targetValKeysUnused := make(map[interface{}]struct{})
- errors := make([]string, 0)
+
+ var errs []error
// This slice will keep track of all the structs we'll be decoding.
// There can be more than one struct if there are embedded structs
@@ -1319,7 +1390,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
// We always parse the tags cause we're looking for other tags too
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
for _, tag := range tagParts[1:] {
- if tag == "squash" {
+ if tag == d.config.SquashTagOption {
squash = true
break
}
@@ -1331,11 +1402,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
if squash {
- if fieldVal.Kind() != reflect.Struct {
- errors = appendErrors(errors,
- fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
- } else {
+ switch fieldVal.Kind() {
+ case reflect.Struct:
structs = append(structs, fieldVal)
+ case reflect.Interface:
+ if !fieldVal.IsNil() {
+ structs = append(structs, fieldVal.Elem().Elem())
+ }
+ default:
+ errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
}
continue
}
@@ -1356,6 +1431,9 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
fieldName := field.Name
tagValue := field.Tag.Get(d.config.TagName)
+ if tagValue == "" && d.config.IgnoreUntaggedFields {
+ continue
+ }
tagValue = strings.SplitN(tagValue, ",", 2)[0]
if tagValue != "" {
fieldName = tagValue
@@ -1409,7 +1487,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
}
}
@@ -1424,7 +1502,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
// Decode it as-if we were just decoding this map onto our map.
if err := d.decodeMap(name, remain, remainField.val); err != nil {
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
}
// Set the map to nil so we have none so that the next check will
@@ -1440,7 +1518,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
sort.Strings(keys)
err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
}
if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
@@ -1451,11 +1529,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
sort.Strings(keys)
err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
- errors = appendErrors(errors, err)
+ errs = append(errs, err)
}
- if len(errors) > 0 {
- return &Error{errors}
+ if err := errors.Join(errs...); err != nil {
+ return err
}
// Add the unused keys to the list of unused keys if we're tracking metadata
@@ -1509,6 +1587,8 @@ func getKind(val reflect.Value) reflect.Kind {
return reflect.Uint
case kind >= reflect.Float32 && kind <= reflect.Float64:
return reflect.Float32
+ case kind >= reflect.Complex64 && kind <= reflect.Complex128:
+ return reflect.Complex64
default:
return kind
}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/test/tools/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
new file mode 100644
index 00000000000..d0913fff6c7
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
@@ -0,0 +1,44 @@
+//go:build !go1.20
+
+package mapstructure
+
+import "reflect"
+
+func isComparable(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Invalid:
+ return false
+
+ case reflect.Array:
+ switch v.Type().Elem().Kind() {
+ case reflect.Interface, reflect.Array, reflect.Struct:
+ for i := 0; i < v.Type().Len(); i++ {
+ // if !v.Index(i).Comparable() {
+ if !isComparable(v.Index(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return v.Type().Comparable()
+
+ case reflect.Interface:
+ // return v.Elem().Comparable()
+ return isComparable(v.Elem())
+
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ return false
+
+ // if !v.Field(i).Comparable() {
+ if !isComparable(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ return v.Type().Comparable()
+ }
+}
diff --git a/test/tools/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/test/tools/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
new file mode 100644
index 00000000000..f8255a1b174
--- /dev/null
+++ b/test/tools/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
@@ -0,0 +1,10 @@
+//go:build go1.20
+
+package mapstructure
+
+import "reflect"
+
+// TODO: remove once we drop support for Go <1.20
+func isComparable(v reflect.Value) bool {
+ return v.Comparable()
+}
diff --git a/test/tools/vendor/github.com/google/uuid/.travis.yml b/test/tools/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a60ba9..00000000000
--- a/test/tools/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4.3
- - 1.5.3
- - tip
-
-script:
- - go test -v ./...
diff --git a/test/tools/vendor/github.com/google/uuid/CHANGELOG.md b/test/tools/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 00000000000..7ec5ac7ea90
--- /dev/null
+++ b/test/tools/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/test/tools/vendor/github.com/google/uuid/CONTRIBUTING.md b/test/tools/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09f136..a502fdc515a 100644
--- a/test/tools/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/test/tools/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
We definitely welcome patches and contribution to this project!
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
diff --git a/test/tools/vendor/github.com/google/uuid/README.md b/test/tools/vendor/github.com/google/uuid/README.md
index f765a46f915..3e9a61889de 100644
--- a/test/tools/vendor/github.com/google/uuid/README.md
+++ b/test/tools/vendor/github.com/google/uuid/README.md
@@ -1,6 +1,6 @@
-# uuid 
+# uuid
The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
###### Documentation
-[](http://godoc.org/github.com/google/uuid)
+[](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
diff --git a/test/tools/vendor/github.com/google/uuid/hash.go b/test/tools/vendor/github.com/google/uuid/hash.go
index b404f4bec27..dc60082d3b3 100644
--- a/test/tools/vendor/github.com/google/uuid/hash.go
+++ b/test/tools/vendor/github.com/google/uuid/hash.go
@@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
)
// NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/test/tools/vendor/github.com/google/uuid/node_js.go b/test/tools/vendor/github.com/google/uuid/node_js.go
index 24b78edc907..b2a0bc8711b 100644
--- a/test/tools/vendor/github.com/google/uuid/node_js.go
+++ b/test/tools/vendor/github.com/google/uuid/node_js.go
@@ -7,6 +7,6 @@
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/test/tools/vendor/github.com/google/uuid/time.go b/test/tools/vendor/github.com/google/uuid/time.go
index e6ef06cdc87..c351129279f 100644
--- a/test/tools/vendor/github.com/google/uuid/time.go
+++ b/test/tools/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@ func setClockSequence(seq int) {
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1 and 2 UUIDs.
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- return Time(time)
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
}
// ClockSequence returns the clock sequence encoded in uuid.
diff --git a/test/tools/vendor/github.com/google/uuid/uuid.go b/test/tools/vendor/github.com/google/uuid/uuid.go
index a57207aeb6f..5232b486780 100644
--- a/test/tools/vendor/github.com/google/uuid/uuid.go
+++ b/test/tools/vendor/github.com/google/uuid/uuid.go
@@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
return ok
}
-// Parse decodes s into a UUID or returns an error. Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
@@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) {
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
- if strings.ToLower(s[:9]) != "urn:uuid:" {
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
@@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) {
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
@@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid
}
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
@@ -292,3 +351,15 @@ func DisableRandPool() {
poolMu.Lock()
poolPos = randPoolSize
}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/test/tools/vendor/github.com/google/uuid/version6.go b/test/tools/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 00000000000..339a959a7a2
--- /dev/null
+++ b/test/tools/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/test/tools/vendor/github.com/google/uuid/version7.go b/test/tools/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 00000000000..3167b643d45
--- /dev/null
+++ b/test/tools/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/test/tools/vendor/github.com/gorilla/handlers/.editorconfig b/test/tools/vendor/github.com/gorilla/handlers/.editorconfig
new file mode 100644
index 00000000000..c6b74c3e0d0
--- /dev/null
+++ b/test/tools/vendor/github.com/gorilla/handlers/.editorconfig
@@ -0,0 +1,20 @@
+; https://editorconfig.org/
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = space
+indent_size = 2
+
+[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
+indent_style = tab
+indent_size = 4
+
+[*.md]
+indent_size = 4
+trim_trailing_whitespace = false
+
+eclint_indent_style = unset
\ No newline at end of file
diff --git a/test/tools/vendor/github.com/gorilla/handlers/.gitignore b/test/tools/vendor/github.com/gorilla/handlers/.gitignore
new file mode 100644
index 00000000000..577a89e8138
--- /dev/null
+++ b/test/tools/vendor/github.com/gorilla/handlers/.gitignore
@@ -0,0 +1,2 @@
+# Output of the go test coverage tool
+coverage.coverprofile
diff --git a/test/tools/vendor/github.com/gorilla/handlers/LICENSE b/test/tools/vendor/github.com/gorilla/handlers/LICENSE
index 66ea3c8ae71..bb9d80bc9b6 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/LICENSE
+++ b/test/tools/vendor/github.com/gorilla/handlers/LICENSE
@@ -1,22 +1,27 @@
-Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved.
+Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
+modification, are permitted provided that the following conditions are
+met:
- Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
- Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/test/tools/vendor/github.com/gorilla/handlers/Makefile b/test/tools/vendor/github.com/gorilla/handlers/Makefile
new file mode 100644
index 00000000000..003b784f7ed
--- /dev/null
+++ b/test/tools/vendor/github.com/gorilla/handlers/Makefile
@@ -0,0 +1,34 @@
+GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
+GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+
+GO_SEC=$(shell which gosec 2> /dev/null || echo '')
+GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
+
+GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
+GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
+
+.PHONY: verify
+verify: sec govulncheck lint test
+
+.PHONY: lint
+lint:
+ $(if $(GO_LINT), ,go install $(GO_LINT_URI))
+ @echo "##### Running golangci-lint #####"
+ golangci-lint run -v
+
+.PHONY: sec
+sec:
+ $(if $(GO_SEC), ,go install $(GO_SEC_URI))
+ @echo "##### Running gosec #####"
+ gosec ./...
+
+.PHONY: govulncheck
+govulncheck:
+ $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
+ @echo "##### Running govulncheck #####"
+ govulncheck ./...
+
+.PHONY: test
+test:
+ @echo "##### Running tests #####"
+ go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
diff --git a/test/tools/vendor/github.com/gorilla/handlers/README.md b/test/tools/vendor/github.com/gorilla/handlers/README.md
index 6eba66bf302..02555b2642c 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/README.md
+++ b/test/tools/vendor/github.com/gorilla/handlers/README.md
@@ -1,10 +1,10 @@
-gorilla/handlers
-================
+# gorilla/handlers
+
+
+[](https://codecov.io/github/gorilla/handlers)
[](https://godoc.org/github.com/gorilla/handlers)
-[](https://circleci.com/gh/gorilla/handlers)
[](https://sourcegraph.com/github.com/gorilla/handlers?badge)
-
Package handlers is a collection of handlers (aka "HTTP middleware") for use
with Go's `net/http` package (or any framework supporting `http.Handler`), including:
diff --git a/test/tools/vendor/github.com/gorilla/handlers/canonical.go b/test/tools/vendor/github.com/gorilla/handlers/canonical.go
index 8437fefc1ef..7121f5307be 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/canonical.go
+++ b/test/tools/vendor/github.com/gorilla/handlers/canonical.go
@@ -21,12 +21,11 @@ type canonical struct {
//
// Example:
//
-// r := mux.NewRouter()
-// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302)
-// r.HandleFunc("/route", YourHandler)
-//
-// log.Fatal(http.ListenAndServe(":7000", canonical(r)))
+// r := mux.NewRouter()
+// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302)
+// r.HandleFunc("/route", YourHandler)
//
+// log.Fatal(http.ListenAndServe(":7000", canonical(r)))
func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler {
fn := func(h http.Handler) http.Handler {
return canonical{h, domain, code}
diff --git a/test/tools/vendor/github.com/gorilla/handlers/compress.go b/test/tools/vendor/github.com/gorilla/handlers/compress.go
index 1e95f1ccbfa..d6f589503b5 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/compress.go
+++ b/test/tools/vendor/github.com/gorilla/handlers/compress.go
@@ -44,13 +44,13 @@ type flusher interface {
Flush() error
}
-func (w *compressResponseWriter) Flush() {
+func (cw *compressResponseWriter) Flush() {
// Flush compressed data if compressor supports it.
- if f, ok := w.compressor.(flusher); ok {
- f.Flush()
+ if f, ok := cw.compressor.(flusher); ok {
+ _ = f.Flush()
}
// Flush HTTP response.
- if f, ok := w.w.(http.Flusher); ok {
+ if f, ok := cw.w.(http.Flusher); ok {
f.Flush()
}
}
diff --git a/test/tools/vendor/github.com/gorilla/handlers/cors.go b/test/tools/vendor/github.com/gorilla/handlers/cors.go
index 0dcdffb3d32..8af9c096e5e 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/cors.go
+++ b/test/tools/vendor/github.com/gorilla/handlers/cors.go
@@ -26,14 +26,14 @@ type cors struct {
type OriginValidator func(string) bool
var (
- defaultCorsOptionStatusCode = 200
- defaultCorsMethods = []string{"GET", "HEAD", "POST"}
+ defaultCorsOptionStatusCode = http.StatusOK
+ defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost}
defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"}
- // (WebKit/Safari v9 sends the Origin header by default in AJAX requests)
+ // (WebKit/Safari v9 sends the Origin header by default in AJAX requests).
)
const (
- corsOptionMethod string = "OPTIONS"
+ corsOptionMethod string = http.MethodOptions
corsAllowOriginHeader string = "Access-Control-Allow-Origin"
corsExposeHeadersHeader string = "Access-Control-Expose-Headers"
corsMaxAgeHeader string = "Access-Control-Max-Age"
@@ -101,10 +101,8 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !ch.isMatch(method, defaultCorsMethods) {
w.Header().Set(corsAllowMethodsHeader, method)
}
- } else {
- if len(ch.exposedHeaders) > 0 {
- w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ","))
- }
+ } else if len(ch.exposedHeaders) > 0 {
+ w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ","))
}
if ch.allowCredentials {
@@ -141,22 +139,21 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// CORS provides Cross-Origin Resource Sharing middleware.
// Example:
//
-// import (
-// "net/http"
-//
-// "github.com/gorilla/handlers"
-// "github.com/gorilla/mux"
-// )
+// import (
+// "net/http"
//
-// func main() {
-// r := mux.NewRouter()
-// r.HandleFunc("/users", UserEndpoint)
-// r.HandleFunc("/projects", ProjectEndpoint)
+// "github.com/gorilla/handlers"
+// "github.com/gorilla/mux"
+// )
//
-// // Apply the CORS middleware to our top-level router, with the defaults.
-// http.ListenAndServe(":8000", handlers.CORS()(r))
-// }
+// func main() {
+// r := mux.NewRouter()
+// r.HandleFunc("/users", UserEndpoint)
+// r.HandleFunc("/projects", ProjectEndpoint)
//
+// // Apply the CORS middleware to our top-level router, with the defaults.
+// http.ListenAndServe(":8000", handlers.CORS()(r))
+// }
func CORS(opts ...CORSOption) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
ch := parseCORSOptions(opts...)
@@ -174,7 +171,7 @@ func parseCORSOptions(opts ...CORSOption) *cors {
}
for _, option := range opts {
- option(ch)
+ _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil?
}
return ch
diff --git a/test/tools/vendor/github.com/gorilla/handlers/handlers.go b/test/tools/vendor/github.com/gorilla/handlers/handlers.go
index 0509482ad7a..9b92fce3333 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/handlers.go
+++ b/test/tools/vendor/github.com/gorilla/handlers/handlers.go
@@ -35,7 +35,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
}
sort.Strings(allow)
w.Header().Set("Allow", strings.Join(allow, ", "))
- if req.Method == "OPTIONS" {
+ if req.Method == http.MethodOptions {
w.WriteHeader(http.StatusOK)
} else {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
@@ -44,7 +44,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
}
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
-// status code and body size
+// status code and body size.
type responseLogger struct {
w http.ResponseWriter
status int
@@ -97,7 +97,7 @@ func isContentType(h http.Header, contentType string) bool {
// Only PUT, POST, and PATCH requests are considered.
func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") {
+ if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) {
h.ServeHTTP(w, r)
return
}
@@ -108,7 +108,10 @@ func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
return
}
}
- http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType)
+ http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q",
+ r.Header.Get("Content-Type"),
+ contentTypes),
+ http.StatusUnsupportedMediaType)
})
}
@@ -133,12 +136,12 @@ const (
// Form method takes precedence over header method.
func HTTPMethodOverrideHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.Method == "POST" {
+ if r.Method == http.MethodPost {
om := r.FormValue(HTTPMethodOverrideFormKey)
if om == "" {
om = r.Header.Get(HTTPMethodOverrideHeader)
}
- if om == "PUT" || om == "PATCH" || om == "DELETE" {
+ if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete {
r.Method = om
}
}
diff --git a/test/tools/vendor/github.com/gorilla/handlers/logging.go b/test/tools/vendor/github.com/gorilla/handlers/logging.go
index 228465eba00..2badb6fbff8 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/logging.go
+++ b/test/tools/vendor/github.com/gorilla/handlers/logging.go
@@ -18,7 +18,7 @@ import (
// Logging
-// LogFormatterParams is the structure any formatter will be handed when time to log comes
+// LogFormatterParams is the structure any formatter will be handed when time to log comes.
type LogFormatterParams struct {
Request *http.Request
URL url.URL
@@ -27,7 +27,7 @@ type LogFormatterParams struct {
Size int
}
-// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler
+// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler.
type LogFormatter func(writer io.Writer, params LogFormatterParams)
// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
@@ -46,7 +46,10 @@ func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.handler.ServeHTTP(w, req)
if req.MultipartForm != nil {
- req.MultipartForm.RemoveAll()
+ err := req.MultipartForm.RemoveAll()
+ if err != nil {
+ return
+ }
}
params := LogFormatterParams{
@@ -76,7 +79,7 @@ const lowerhex = "0123456789abcdef"
func appendQuoted(buf []byte, s string) []byte {
var runeTmp [utf8.UTFMax]byte
- for width := 0; len(s) > 0; s = s[width:] {
+ for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1
r := rune(s[0])
width = 1
if r >= utf8.RuneSelf {
@@ -191,7 +194,7 @@ func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int
func writeLog(writer io.Writer, params LogFormatterParams) {
buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
buf = append(buf, '\n')
- writer.Write(buf)
+ _, _ = writer.Write(buf)
}
// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
@@ -204,7 +207,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) {
buf = append(buf, `" "`...)
buf = appendQuoted(buf, params.Request.UserAgent())
buf = append(buf, '"', '\n')
- writer.Write(buf)
+ _, _ = writer.Write(buf)
}
// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
@@ -212,7 +215,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) {
//
// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
//
-// LoggingHandler always sets the ident field of the log to -
+// LoggingHandler always sets the ident field of the log to -.
func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
return loggingHandler{out, h, writeCombinedLog}
}
@@ -226,19 +229,18 @@ func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
//
// Example:
//
-// r := mux.NewRouter()
-// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
-// w.Write([]byte("This is a catch-all route"))
-// })
-// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
-// http.ListenAndServe(":1123", loggedRouter)
-//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("This is a catch-all route"))
+// })
+// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
+// http.ListenAndServe(":1123", loggedRouter)
func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
return loggingHandler{out, h, writeLog}
}
// CustomLoggingHandler provides a way to supply a custom log formatter
-// while taking advantage of the mechanisms in this package
+// while taking advantage of the mechanisms in this package.
func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler {
return loggingHandler{out, h, f}
}
diff --git a/test/tools/vendor/github.com/gorilla/handlers/proxy_headers.go b/test/tools/vendor/github.com/gorilla/handlers/proxy_headers.go
index ed939dcef5d..281d753e95a 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/proxy_headers.go
+++ b/test/tools/vendor/github.com/gorilla/handlers/proxy_headers.go
@@ -18,7 +18,7 @@ var (
var (
// RFC7239 defines a new "Forwarded: " header designed to replace the
// existing use of X-Forwarded-* headers.
- // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43
+ // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43.
forwarded = http.CanonicalHeaderKey("Forwarded")
// Allows for a sub-match of the first value after 'for=' to the next
// comma, semi-colon or space. The match is case-insensitive.
@@ -67,7 +67,9 @@ func ProxyHeaders(h http.Handler) http.Handler {
func getIP(r *http.Request) string {
var addr string
- if fwd := r.Header.Get(xForwardedFor); fwd != "" {
+ switch {
+ case r.Header.Get(xForwardedFor) != "":
+ fwd := r.Header.Get(xForwardedFor)
// Only grab the first (client) address. Note that '192.168.0.1,
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
// the first may represent forwarding proxies earlier in the chain.
@@ -76,17 +78,15 @@ func getIP(r *http.Request) string {
s = len(fwd)
}
addr = fwd[:s]
- } else if fwd := r.Header.Get(xRealIP); fwd != "" {
- // X-Real-IP should only contain one IP address (the client making the
- // request).
- addr = fwd
- } else if fwd := r.Header.Get(forwarded); fwd != "" {
+ case r.Header.Get(xRealIP) != "":
+ addr = r.Header.Get(xRealIP)
+ case r.Header.Get(forwarded) != "":
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=' capture, which we ignore. In the case of multiple IP
// addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only
// extract the first, which should be the client IP.
- if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
+ if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 {
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
// these quotes.
addr = strings.Trim(match[1], `"`)
diff --git a/test/tools/vendor/github.com/gorilla/handlers/recovery.go b/test/tools/vendor/github.com/gorilla/handlers/recovery.go
index 4c4c1d9c6ce..0d4f955ecbd 100644
--- a/test/tools/vendor/github.com/gorilla/handlers/recovery.go
+++ b/test/tools/vendor/github.com/gorilla/handlers/recovery.go
@@ -36,12 +36,12 @@ func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler {
//
// Example:
//
-// r := mux.NewRouter()
-// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
-// panic("Unexpected error!")
-// })
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// panic("Unexpected error!")
+// })
//
-// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r))
+// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r))
func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
r := &recoveryHandler{handler: h}
@@ -50,20 +50,22 @@ func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
}
// RecoveryLogger is a functional option to override
-// the default logger
+// the default logger.
func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
return func(h http.Handler) {
- r := h.(*recoveryHandler)
+ r := h.(*recoveryHandler) //nolint:errcheck //TODO:
+ // @bharat-rajani should return type-assertion error but would break the API?
r.logger = logger
}
}
// PrintRecoveryStack is a functional option to enable
// or disable printing stack traces on panic.
-func PrintRecoveryStack(print bool) RecoveryOption {
+func PrintRecoveryStack(shouldPrint bool) RecoveryOption {
return func(h http.Handler) {
- r := h.(*recoveryHandler)
- r.printStack = print
+ r := h.(*recoveryHandler) //nolint:errcheck //TODO:
+ // @bharat-rajani should return type-assertion error but would break the API?
+ r.printStack = shouldPrint
}
}
diff --git a/test/tools/vendor/github.com/huandu/xstrings/convert.go b/test/tools/vendor/github.com/huandu/xstrings/convert.go
index 151c3151d9c..cba0d072520 100644
--- a/test/tools/vendor/github.com/huandu/xstrings/convert.go
+++ b/test/tools/vendor/github.com/huandu/xstrings/convert.go
@@ -12,11 +12,12 @@ import (
// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case.
//
// Some samples.
-// "some_words" => "SomeWords"
-// "http_server" => "HttpServer"
-// "no_https" => "NoHttps"
-// "_complex__case_" => "_Complex_Case_"
-// "some words" => "SomeWords"
+//
+// "some_words" => "SomeWords"
+// "http_server" => "HttpServer"
+// "no_https" => "NoHttps"
+// "_complex__case_" => "_Complex_Case_"
+// "some words" => "SomeWords"
func ToCamelCase(str string) string {
if len(str) == 0 {
return ""
@@ -61,7 +62,6 @@ func ToCamelCase(str string) string {
if isConnector(r1) {
r0 = unicode.ToUpper(r0)
} else {
- r0 = unicode.ToLower(r0)
buf.WriteRune(r1)
}
}
@@ -74,16 +74,17 @@ func ToCamelCase(str string) string {
// snake case format.
//
// Some samples.
-// "FirstName" => "first_name"
-// "HTTPServer" => "http_server"
-// "NoHTTPS" => "no_https"
-// "GO_PATH" => "go_path"
-// "GO PATH" => "go_path" // space is converted to underscore.
-// "GO-PATH" => "go_path" // hyphen is converted to underscore.
-// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet.
-// "HTTP20xOK" => "http_20x_ok"
-// "Duration2m3s" => "duration_2m3s"
-// "Bld4Floor3rd" => "bld4_floor_3rd"
+//
+// "FirstName" => "first_name"
+// "HTTPServer" => "http_server"
+// "NoHTTPS" => "no_https"
+// "GO_PATH" => "go_path"
+// "GO PATH" => "go_path" // space is converted to underscore.
+// "GO-PATH" => "go_path" // hyphen is converted to underscore.
+// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet.
+// "HTTP20xOK" => "http_20x_ok"
+// "Duration2m3s" => "duration_2m3s"
+// "Bld4Floor3rd" => "bld4_floor_3rd"
func ToSnakeCase(str string) string {
return camelCaseToLowerCase(str, '_')
}
@@ -92,16 +93,17 @@ func ToSnakeCase(str string) string {
// kebab case format.
//
// Some samples.
-// "FirstName" => "first-name"
-// "HTTPServer" => "http-server"
-// "NoHTTPS" => "no-https"
-// "GO_PATH" => "go-path"
-// "GO PATH" => "go-path" // space is converted to '-'.
-// "GO-PATH" => "go-path" // hyphen is converted to '-'.
-// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet.
-// "HTTP20xOK" => "http-20x-ok"
-// "Duration2m3s" => "duration-2m3s"
-// "Bld4Floor3rd" => "bld4-floor-3rd"
+//
+// "FirstName" => "first-name"
+// "HTTPServer" => "http-server"
+// "NoHTTPS" => "no-https"
+// "GO_PATH" => "go-path"
+// "GO PATH" => "go-path" // space is converted to '-'.
+// "GO-PATH" => "go-path" // hyphen is converted to '-'.
+// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet.
+// "HTTP20xOK" => "http-20x-ok"
+// "Duration2m3s" => "duration-2m3s"
+// "Bld4Floor3rd" => "bld4-floor-3rd"
func ToKebabCase(str string) string {
return camelCaseToLowerCase(str, '-')
}
@@ -510,17 +512,18 @@ func ShuffleSource(str string, src rand.Source) string {
// regardless whether the result is a valid rune or not.
//
// Only following characters are alphanumeric.
-// * a - z
-// * A - Z
-// * 0 - 9
+// - a - z
+// - A - Z
+// - 0 - 9
//
// Samples (borrowed from ruby's String#succ document):
-// "abcd" => "abce"
-// "THX1138" => "THX1139"
-// "<>" => "<>"
-// "1999zzz" => "2000aaa"
-// "ZZZ9999" => "AAAA0000"
-// "***" => "**+"
+//
+// "abcd" => "abce"
+// "THX1138" => "THX1139"
+// "<>" => "<>"
+// "1999zzz" => "2000aaa"
+// "ZZZ9999" => "AAAA0000"
+// "***" => "**+"
func Successor(str string) string {
if str == "" {
return str
diff --git a/test/tools/vendor/github.com/huandu/xstrings/format.go b/test/tools/vendor/github.com/huandu/xstrings/format.go
index 8cd76c525cc..b32219bbd58 100644
--- a/test/tools/vendor/github.com/huandu/xstrings/format.go
+++ b/test/tools/vendor/github.com/huandu/xstrings/format.go
@@ -17,9 +17,10 @@ import (
// If tabSize <= 0, ExpandTabs panics with error.
//
// Samples:
-// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k"
-// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l"
-// ExpandTabs("z中\t文\tw", 4) => "z中 文 w"
+//
+// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k"
+// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l"
+// ExpandTabs("z中\t文\tw", 4) => "z中 文 w"
func ExpandTabs(str string, tabSize int) string {
if tabSize <= 0 {
panic("tab size must be positive")
@@ -74,9 +75,10 @@ func ExpandTabs(str string, tabSize int) string {
// If pad is an empty string, str will be returned.
//
// Samples:
-// LeftJustify("hello", 4, " ") => "hello"
-// LeftJustify("hello", 10, " ") => "hello "
-// LeftJustify("hello", 10, "123") => "hello12312"
+//
+// LeftJustify("hello", 4, " ") => "hello"
+// LeftJustify("hello", 10, " ") => "hello "
+// LeftJustify("hello", 10, "123") => "hello12312"
func LeftJustify(str string, length int, pad string) string {
l := Len(str)
@@ -100,9 +102,10 @@ func LeftJustify(str string, length int, pad string) string {
// If pad is an empty string, str will be returned.
//
// Samples:
-// RightJustify("hello", 4, " ") => "hello"
-// RightJustify("hello", 10, " ") => " hello"
-// RightJustify("hello", 10, "123") => "12312hello"
+//
+// RightJustify("hello", 4, " ") => "hello"
+// RightJustify("hello", 10, " ") => " hello"
+// RightJustify("hello", 10, "123") => "12312hello"
func RightJustify(str string, length int, pad string) string {
l := Len(str)
@@ -126,9 +129,10 @@ func RightJustify(str string, length int, pad string) string {
// If pad is an empty string, str will be returned.
//
// Samples:
-// Center("hello", 4, " ") => "hello"
-// Center("hello", 10, " ") => " hello "
-// Center("hello", 10, "123") => "12hello123"
+//
+// Center("hello", 4, " ") => "hello"
+// Center("hello", 10, " ") => " hello "
+// Center("hello", 10, "123") => "12hello123"
func Center(str string, length int, pad string) string {
l := Len(str)
diff --git a/test/tools/vendor/github.com/huandu/xstrings/manipulate.go b/test/tools/vendor/github.com/huandu/xstrings/manipulate.go
index 64075f9bb8a..ab42fe0fec6 100644
--- a/test/tools/vendor/github.com/huandu/xstrings/manipulate.go
+++ b/test/tools/vendor/github.com/huandu/xstrings/manipulate.go
@@ -79,10 +79,12 @@ func Slice(str string, start, end int) string {
// The return value is a slice of strings with head, match and tail.
//
// If str contains sep, for example "hello" and "l", Partition returns
-// "he", "l", "lo"
+//
+// "he", "l", "lo"
//
// If str doesn't contain sep, for example "hello" and "x", Partition returns
-// "hello", "", ""
+//
+// "hello", "", ""
func Partition(str, sep string) (head, match, tail string) {
index := strings.Index(str, sep)
@@ -101,10 +103,12 @@ func Partition(str, sep string) (head, match, tail string) {
// The return value is a slice of strings with head, match and tail.
//
// If str contains sep, for example "hello" and "l", LastPartition returns
-// "hel", "l", "o"
+//
+// "hel", "l", "o"
//
// If str doesn't contain sep, for example "hello" and "x", LastPartition returns
-// "", "", "hello"
+//
+// "", "", "hello"
func LastPartition(str, sep string) (head, match, tail string) {
index := strings.LastIndex(str, sep)
diff --git a/test/tools/vendor/github.com/huandu/xstrings/stringbuilder.go b/test/tools/vendor/github.com/huandu/xstrings/stringbuilder.go
index bb0919d32f7..06812fea07d 100644
--- a/test/tools/vendor/github.com/huandu/xstrings/stringbuilder.go
+++ b/test/tools/vendor/github.com/huandu/xstrings/stringbuilder.go
@@ -1,4 +1,5 @@
-//+build go1.10
+//go:build go1.10
+// +build go1.10
package xstrings
diff --git a/test/tools/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/test/tools/vendor/github.com/huandu/xstrings/stringbuilder_go110.go
index dac389d139e..ccaa5aedd33 100644
--- a/test/tools/vendor/github.com/huandu/xstrings/stringbuilder_go110.go
+++ b/test/tools/vendor/github.com/huandu/xstrings/stringbuilder_go110.go
@@ -1,4 +1,5 @@
-//+build !go1.10
+//go:build !go1.10
+// +build !go1.10
package xstrings
diff --git a/test/tools/vendor/github.com/huandu/xstrings/translate.go b/test/tools/vendor/github.com/huandu/xstrings/translate.go
index 42e694fb176..1fac6a00be3 100644
--- a/test/tools/vendor/github.com/huandu/xstrings/translate.go
+++ b/test/tools/vendor/github.com/huandu/xstrings/translate.go
@@ -416,14 +416,16 @@ func (tr *Translator) HasPattern() bool {
//
// From and to are patterns representing a set of characters. Pattern is defined as following.
//
-// * Special characters
-// * '-' means a range of runes, e.g.
-// * "a-z" means all characters from 'a' to 'z' inclusive;
-// * "z-a" means all characters from 'z' to 'a' inclusive.
-// * '^' as first character means a set of all runes excepted listed, e.g.
-// * "^a-z" means all characters except 'a' to 'z' inclusive.
-// * '\' escapes special characters.
-// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'.
+// Special characters:
+//
+// 1. '-' means a range of runes, e.g.
+// "a-z" means all characters from 'a' to 'z' inclusive;
+// "z-a" means all characters from 'z' to 'a' inclusive.
+// 2. '^' as first character means a set of all runes excepted listed, e.g.
+// "^a-z" means all characters except 'a' to 'z' inclusive.
+// 3. '\' escapes special characters.
+//
+// Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'.
//
// Translate will try to find a 1:1 mapping from from to to.
// If to is smaller than from, last rune in to will be used to map "out of range" characters in from.
@@ -433,12 +435,13 @@ func (tr *Translator) HasPattern() bool {
// If the to pattern is an empty string, Translate works exactly the same as Delete.
//
// Samples:
-// Translate("hello", "aeiou", "12345") => "h2ll4"
-// Translate("hello", "a-z", "A-Z") => "HELLO"
-// Translate("hello", "z-a", "a-z") => "svool"
-// Translate("hello", "aeiou", "*") => "h*ll*"
-// Translate("hello", "^l", "*") => "**ll*"
-// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d"
+//
+// Translate("hello", "aeiou", "12345") => "h2ll4"
+// Translate("hello", "a-z", "A-Z") => "HELLO"
+// Translate("hello", "z-a", "a-z") => "svool"
+// Translate("hello", "aeiou", "*") => "h*ll*"
+// Translate("hello", "^l", "*") => "**ll*"
+// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d"
func Translate(str, from, to string) string {
tr := NewTranslator(from, to)
return tr.Translate(str)
@@ -448,9 +451,10 @@ func Translate(str, from, to string) string {
// Pattern is defined in Translate function.
//
// Samples:
-// Delete("hello", "aeiou") => "hll"
-// Delete("hello", "a-k") => "llo"
-// Delete("hello", "^a-k") => "he"
+//
+// Delete("hello", "aeiou") => "hll"
+// Delete("hello", "a-k") => "llo"
+// Delete("hello", "^a-k") => "he"
func Delete(str, pattern string) string {
tr := NewTranslator(pattern, "")
return tr.Translate(str)
@@ -460,9 +464,10 @@ func Delete(str, pattern string) string {
// Pattern is defined in Translate function.
//
// Samples:
-// Count("hello", "aeiou") => 3
-// Count("hello", "a-k") => 3
-// Count("hello", "^a-k") => 2
+//
+// Count("hello", "aeiou") => 3
+// Count("hello", "a-k") => 3
+// Count("hello", "^a-k") => 2
func Count(str, pattern string) int {
if pattern == "" || str == "" {
return 0
@@ -491,9 +496,10 @@ func Count(str, pattern string) int {
// If pattern is not empty, only runes matching the pattern will be squeezed.
//
// Samples:
-// Squeeze("hello", "") => "helo"
-// Squeeze("hello", "m-z") => "hello"
-// Squeeze("hello world", " ") => "hello world"
+//
+// Squeeze("hello", "") => "helo"
+// Squeeze("hello", "m-z") => "hello"
+// Squeeze("hello world", " ") => "hello world"
func Squeeze(str, pattern string) string {
var last, r rune
var size int
diff --git a/test/tools/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/test/tools/vendor/github.com/imdario/mergo/CONTRIBUTING.md
new file mode 100644
index 00000000000..0a1ff9f94d8
--- /dev/null
+++ b/test/tools/vendor/github.com/imdario/mergo/CONTRIBUTING.md
@@ -0,0 +1,112 @@
+
+# Contributing to mergo
+
+First off, thanks for taking the time to contribute! ❤️
+
+All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
+
+> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
+> - Star the project
+> - Tweet about it
+> - Refer this project in your project's readme
+> - Mention the project at local meetups and tell your friends/colleagues
+
+
+## Table of Contents
+
+- [Code of Conduct](#code-of-conduct)
+- [I Have a Question](#i-have-a-question)
+- [I Want To Contribute](#i-want-to-contribute)
+- [Reporting Bugs](#reporting-bugs)
+- [Suggesting Enhancements](#suggesting-enhancements)
+
+## Code of Conduct
+
+This project and everyone participating in it is governed by the
+[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
+By participating, you are expected to uphold this code. Please report unacceptable behavior
+to <>.
+
+
+## I Have a Question
+
+> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
+
+Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
+
+If you then still feel the need to ask a question and need clarification, we recommend the following:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new).
+- Provide as much context as you can about what you're running into.
+- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
+
+We will then take care of the issue as soon as possible.
+
+## I Want To Contribute
+
+> ### Legal Notice
+> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
+
+### Reporting Bugs
+
+
+#### Before Submitting a Bug Report
+
+A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
+
+- Make sure that you are using the latest version.
+- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
+- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
+- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
+- Collect information about the bug:
+- Stack trace (Traceback)
+- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
+- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
+- Possibly your input and the output
+- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
+
+
+#### How Do I Submit a Good Bug Report?
+
+> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
+
+
+We use GitHub issues to track bugs and errors. If you run into an issue with the project:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
+- Explain the behavior you would expect and the actual behavior.
+- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
+- Provide the information you collected in the previous section.
+
+Once it's filed:
+
+- The project team will label the issue accordingly.
+- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
+- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
+
+### Suggesting Enhancements
+
+This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
+
+
+#### Before Submitting an Enhancement
+
+- Make sure that you are using the latest version.
+- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
+- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
+- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
+
+
+#### How Do I Submit a Good Enhancement Suggestion?
+
+Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
+
+- Use a **clear and descriptive title** for the issue to identify the suggestion.
+- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
+- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
+- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
+- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
+
+
+## Attribution
+This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
diff --git a/test/tools/vendor/github.com/imdario/mergo/README.md b/test/tools/vendor/github.com/imdario/mergo/README.md
index aa8cbd7ce6d..ffbbb62c704 100644
--- a/test/tools/vendor/github.com/imdario/mergo/README.md
+++ b/test/tools/vendor/github.com/imdario/mergo/README.md
@@ -1,18 +1,20 @@
# Mergo
-
-[![GoDoc][3]][4]
[![GitHub release][5]][6]
[![GoCard][7]][8]
-[![Build Status][1]][2]
-[![Coverage Status][9]][10]
+[![Test status][1]][2]
+[![OpenSSF Scorecard][21]][22]
+[![OpenSSF Best Practices][19]][20]
+[![Coverage status][9]][10]
[![Sourcegraph][11]][12]
-[![FOSSA Status][13]][14]
+[![FOSSA status][13]][14]
-[![GoCenter Kudos][15]][16]
+[![GoDoc][3]][4]
+[![Become my sponsor][15]][16]
+[![Tidelift][17]][18]
-[1]: https://travis-ci.org/imdario/mergo.png
-[2]: https://travis-ci.org/imdario/mergo
+[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
+[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://img.shields.io/github/release/imdario/mergo.svg
@@ -25,8 +27,14 @@
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
-[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
-[16]: https://search.gocenter.io/github.com/imdario/mergo
+[15]: https://img.shields.io/github/sponsors/imdario
+[16]: https://github.com/sponsors/imdario
+[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
+[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
+[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
+[20]: https://bestpractices.coreinfrastructure.org/projects/7177
+[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
+[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@@ -36,11 +44,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
-Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
@@ -51,9 +59,8 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-[](https://beerpay.io/imdario/mergo)
-[](https://beerpay.io/imdario/mergo)
+
### Mergo in the wild
@@ -98,6 +105,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
+- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+- [tjpnz/structbot](https://github.com/tjpnz/structbot)
## Install
@@ -168,7 +177,7 @@ func main() {
Note: if test are failing due missing package, please execute:
- go get gopkg.in/yaml.v2
+ go get gopkg.in/yaml.v3
### Transformers
@@ -218,7 +227,6 @@ func main() {
}
```
-
## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
@@ -227,21 +235,8 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
Written by [Dario Castañé](http://dario.im).
-## Top Contributors
-
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
-
-
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
-
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/test/tools/vendor/github.com/imdario/mergo/SECURITY.md b/test/tools/vendor/github.com/imdario/mergo/SECURITY.md
new file mode 100644
index 00000000000..a5de61f77ba
--- /dev/null
+++ b/test/tools/vendor/github.com/imdario/mergo/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.3.x | :white_check_mark: |
+| < 0.3 | :x: |
+
+## Security contact information
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure.
diff --git a/test/tools/vendor/github.com/imdario/mergo/map.go b/test/tools/vendor/github.com/imdario/mergo/map.go
index a13a7ee46c7..b50d5c2a4e7 100644
--- a/test/tools/vendor/github.com/imdario/mergo/map.go
+++ b/test/tools/vendor/github.com/imdario/mergo/map.go
@@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
}
// Remember, remember...
- visited[h] = &visit{addr, typ, seen}
+ visited[h] = &visit{typ, seen, addr}
}
zeroValue := reflect.Value{}
switch dst.Kind() {
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
dstMap[fieldName] = src.Field(i).Interface()
}
}
@@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
func _map(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
- return ErrNonPointerAgument
+ return ErrNonPointerArgument
}
var (
vDst, vSrc reflect.Value
diff --git a/test/tools/vendor/github.com/imdario/mergo/merge.go b/test/tools/vendor/github.com/imdario/mergo/merge.go
index 8c2a8fcd901..0ef9b2138c1 100644
--- a/test/tools/vendor/github.com/imdario/mergo/merge.go
+++ b/test/tools/vendor/github.com/imdario/mergo/merge.go
@@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool {
}
type Config struct {
+ Transformers Transformers
Overwrite bool
+ ShouldNotDereference bool
AppendSlice bool
TypeCheck bool
- Transformers Transformers
overwriteWithEmptyValue bool
overwriteSliceWithEmptyValue bool
sliceDeepCopy bool
@@ -76,10 +77,10 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
// Remember, remember...
- visited[h] = &visit{addr, typ, seen}
+ visited[h] = &visit{typ, seen, addr}
}
- if config.Transformers != nil && !isEmptyValue(dst) {
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return
@@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
} else {
- if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
+ if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
dst.Set(src)
}
}
@@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
if src.Kind() != reflect.Map {
- if overwrite {
+ if overwrite && dst.CanSet() {
dst.Set(src)
}
return
@@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dstSlice = reflect.ValueOf(dstElement.Interface())
}
- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
if typeCheck && srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
@@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dst.SetMapIndex(key, dstSlice)
}
}
- if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
- continue
+
+ if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
+ continue
+ }
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
}
- if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
dst.SetMapIndex(key, srcElement)
}
}
+
+ // Ensure that all keys in dst are deleted if they are not in src.
+ if overwriteWithEmptySrc {
+ for _, key := range dst.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ dst.SetMapIndex(key, reflect.Value{})
+ }
+ }
+ }
case reflect.Slice:
if !dst.CanSet() {
break
}
- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
dst.Set(src)
} else if config.AppendSlice {
if src.Type() != dst.Type() {
@@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if src.Kind() != reflect.Interface {
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
} else if src.Kind() == reflect.Ptr {
- if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
+ if !config.ShouldNotDereference {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
+ dst.Set(src)
+ }
}
} else if dst.Elem().Type() == src.Type() {
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
@@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
if dst.IsNil() || overwrite {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
break
@@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
break
}
default:
- mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
+ mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
if mustSet {
if dst.CanSet() {
dst.Set(src)
@@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) {
config.overwriteSliceWithEmptyValue = true
}
+// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
+// (i.e. a non-nil pointer is never considered empty).
+func WithoutDereference(config *Config) {
+ config.ShouldNotDereference = true
+}
+
// WithAppendSlice will make merge append slices instead of overwriting it.
func WithAppendSlice(config *Config) {
config.AppendSlice = true
@@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) {
func merge(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
- return ErrNonPointerAgument
+ return ErrNonPointerArgument
}
var (
vDst, vSrc reflect.Value
diff --git a/test/tools/vendor/github.com/imdario/mergo/mergo.go b/test/tools/vendor/github.com/imdario/mergo/mergo.go
index 3cc926c7f62..0a721e2d858 100644
--- a/test/tools/vendor/github.com/imdario/mergo/mergo.go
+++ b/test/tools/vendor/github.com/imdario/mergo/mergo.go
@@ -17,10 +17,10 @@ import (
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
- ErrNonPointerAgument = errors.New("dst must be a pointer")
+ ErrNonPointerArgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
@@ -28,13 +28,13 @@ var (
// checks in progress are true when it reencounters them.
// Visited are stored in a map indexed by 17 * a1 + a2;
type visit struct {
- ptr uintptr
typ reflect.Type
next *visit
+ ptr uintptr
}
// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
+func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
@@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool {
if v.IsNil() {
return true
}
- return isEmptyValue(v.Elem())
+ if shouldDereference {
+ return isEmptyValue(v.Elem(), shouldDereference)
+ }
+ return false
case reflect.Func:
return v.IsNil()
case reflect.Invalid:
@@ -65,7 +68,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return
}
vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/.gitignore b/test/tools/vendor/github.com/pelletier/go-toml/v2/.gitignore
index a69e2b0ebd7..4b7c4eda3a9 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/.gitignore
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/.gitignore
@@ -3,4 +3,5 @@ fuzz/
cmd/tomll/tomll
cmd/tomljson/tomljson
cmd/tomltestgen/tomltestgen
-dist
\ No newline at end of file
+dist
+tests/
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/test/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
index 3aa1840ec49..1d8b69e65e0 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
@@ -18,6 +18,7 @@ builds:
- linux_amd64
- linux_arm64
- linux_arm
+ - linux_riscv64
- windows_amd64
- windows_arm64
- windows_arm
@@ -37,6 +38,7 @@ builds:
- linux_amd64
- linux_arm64
- linux_arm
+ - linux_riscv64
- windows_amd64
- windows_arm64
- windows_arm
@@ -55,6 +57,7 @@ builds:
targets:
- linux_amd64
- linux_arm64
+ - linux_riscv64
- linux_arm
- windows_amd64
- windows_arm64
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/LICENSE b/test/tools/vendor/github.com/pelletier/go-toml/v2/LICENSE
index 6839d51cd43..991e2ae966e 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/LICENSE
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/LICENSE
@@ -1,6 +1,7 @@
The MIT License (MIT)
-Copyright (c) 2013 - 2022 Thomas Pelletier, Eric Anderton
+go-toml v2
+Copyright (c) 2021 - 2023 Thomas Pelletier
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/README.md b/test/tools/vendor/github.com/pelletier/go-toml/v2/README.md
index d53f4397145..b10f97f0bdd 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/README.md
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/README.md
@@ -45,16 +45,15 @@ to check for typos. [See example in the documentation][strict].
### Contextualized errors
-When most decoding errors occur, go-toml returns [`DecodeError`][decode-err]),
+When most decoding errors occur, go-toml returns [`DecodeError`][decode-err],
which contains a human readable contextualized version of the error. For
example:
```
-2| key1 = "value1"
-3| key2 = "missing2"
- | ~~~~ missing field
-4| key3 = "missing3"
-5| key4 = "value4"
+1| [server]
+2| path = 100
+ | ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string
+3| port = 50
```
[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError
@@ -73,6 +72,26 @@ representation.
[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime
[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime
+### Commented config
+
+Since TOML is often used for configuration files, go-toml can emit documents
+annotated with [comments and commented-out values][comments-example]. For
+example, it can generate the following file:
+
+```toml
+# Host IP to connect to.
+host = '127.0.0.1'
+# Port of the remote server.
+port = 4242
+
+# Encryption parameters (optional)
+# [TLS]
+# cipher = 'AEAD-AES128-GCM-SHA256'
+# version = 'TLS 1.3'
+```
+
+[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented
+
## Getting started
Given the following struct, let's see how to read it and write it as TOML:
@@ -160,12 +179,12 @@ Execution time speedup compared to other Go TOML libraries:
Benchmark
go-toml v1
BurntSushi/toml
-
Marshal/HugoFrontMatter-2
1.9x
1.9x
-
Marshal/ReferenceFile/map-2
1.7x
1.8x
-
Marshal/ReferenceFile/struct-2
2.2x
2.5x
-
Unmarshal/HugoFrontMatter-2
2.9x
2.9x
-
Unmarshal/ReferenceFile/map-2
2.6x
2.9x
-
Unmarshal/ReferenceFile/struct-2
4.4x
5.3x
+
Marshal/HugoFrontMatter-2
1.9x
2.2x
+
Marshal/ReferenceFile/map-2
1.7x
2.1x
+
Marshal/ReferenceFile/struct-2
2.2x
3.0x
+
Unmarshal/HugoFrontMatter-2
2.9x
2.7x
+
Unmarshal/ReferenceFile/map-2
2.6x
2.7x
+
Unmarshal/ReferenceFile/struct-2
4.6x
5.1x
See more
@@ -178,17 +197,17 @@ provided for completeness.
Benchmark
go-toml v1
BurntSushi/toml
-
Marshal/SimpleDocument/map-2
1.8x
2.9x
-
Marshal/SimpleDocument/struct-2
2.7x
4.2x
-
Unmarshal/SimpleDocument/map-2
4.5x
3.1x
-
Unmarshal/SimpleDocument/struct-2
6.2x
3.9x
-
UnmarshalDataset/example-2
3.1x
3.5x
-
UnmarshalDataset/code-2
2.3x
3.1x
-
UnmarshalDataset/twitter-2
2.5x
2.6x
-
UnmarshalDataset/citm_catalog-2
2.1x
2.2x
-
UnmarshalDataset/canada-2
1.6x
1.3x
-
UnmarshalDataset/config-2
4.3x
3.2x
-
[Geo mean]
2.7x
2.8x
+
Marshal/SimpleDocument/map-2
1.8x
2.7x
+
Marshal/SimpleDocument/struct-2
2.7x
3.8x
+
Unmarshal/SimpleDocument/map-2
3.8x
3.0x
+
Unmarshal/SimpleDocument/struct-2
5.6x
4.1x
+
UnmarshalDataset/example-2
3.0x
3.2x
+
UnmarshalDataset/code-2
2.3x
2.9x
+
UnmarshalDataset/twitter-2
2.6x
2.7x
+
UnmarshalDataset/citm_catalog-2
2.2x
2.3x
+
UnmarshalDataset/canada-2
1.8x
1.5x
+
UnmarshalDataset/config-2
4.1x
2.9x
+
geomean
2.7x
2.8x
This table can be generated with ./ci.sh benchmark -a -html.
@@ -497,27 +516,20 @@ is not necessary anymore.
V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`,
`toml`, and `omitempty`. To behave more like the standard library, v2 has merged
-`toml`, `multiline`, and `omitempty`. For example:
+`toml`, `multiline`, `commented`, and `omitempty`. For example:
```go
type doc struct {
// v1
- F string `toml:"field" multiline:"true" omitempty:"true"`
+ F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"`
// v2
- F string `toml:"field,multiline,omitempty"`
+ F string `toml:"field,multiline,omitempty,commented"`
}
```
Has a result, the `Encoder.SetTag*` methods have been removed, as there is just
one tag now.
-
-#### `commented` tag has been removed
-
-There is no replacement for the `commented` tag. This feature would be better
-suited in a proper document model for go-toml v2, which has been [cut from
-scope][nodoc] at the moment.
-
#### `Encoder.ArraysWithOneElementPerLine` has been renamed
The new name is `Encoder.SetArraysMultiline`. The behavior should be the same.
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/test/tools/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
index b2f21cfc92c..d4d554fda9d 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/SECURITY.md
@@ -2,9 +2,6 @@
## Supported Versions
-Use this section to tell people about which versions of your project are
-currently being supported with security updates.
-
| Version | Supported |
| ---------- | ------------------ |
| Latest 2.x | :white_check_mark: |
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/ci.sh b/test/tools/vendor/github.com/pelletier/go-toml/v2/ci.sh
index 05c76f29757..86217a9b097 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/ci.sh
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/ci.sh
@@ -77,8 +77,9 @@ cover() {
pushd "$dir"
go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./...
- cat coverage.out.tmp | grep -v fuzz | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out
+ grep -Ev '(fuzz|testsuite|tomltestgen|gotoml-test-decoder|gotoml-test-encoder)' coverage.out.tmp > coverage.out
go tool cover -func=coverage.out
+ echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2
popd
if [ "${branch}" != "HEAD" ]; then
@@ -151,7 +152,7 @@ bench() {
fi
export GOMAXPROCS=2
- nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}"
+ go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}"
popd
if [ "${branch}" != "HEAD" ]; then
@@ -160,10 +161,12 @@ bench() {
}
fmktemp() {
- if mktemp --version|grep GNU >/dev/null; then
- mktemp --suffix=-$1;
+ if mktemp --version &> /dev/null; then
+ # GNU
+ mktemp --suffix=-$1
else
- mktemp -t $1;
+ # BSD
+ mktemp -t $1
fi
}
@@ -183,12 +186,14 @@ with open(sys.argv[1]) as f:
lines.append(line.split(','))
results = []
-for line in reversed(lines[1:]):
+for line in reversed(lines[2:]):
+ if len(line) < 8 or line[0] == "":
+ continue
v2 = float(line[1])
results.append([
line[0].replace("-32", ""),
"%.1fx" % (float(line[3])/v2), # v1
- "%.1fx" % (float(line[5])/v2), # bs
+ "%.1fx" % (float(line[7])/v2), # bs
])
# move geomean to the end
results.append(results[0])
@@ -259,10 +264,10 @@ benchmark() {
if [ "$1" = "-html" ]; then
tmpcsv=`fmktemp csv`
- benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv
+ benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv
benchstathtml $tmpcsv
else
- benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt
+ benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt
fi
rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/decode.go b/test/tools/vendor/github.com/pelletier/go-toml/v2/decode.go
index 3a860d0f6ab..f0ec3b17054 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/decode.go
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/decode.go
@@ -318,7 +318,7 @@ func parseFloat(b []byte) (float64, error) {
if cleaned[0] == '+' || cleaned[0] == '-' {
start = 1
}
- if cleaned[start] == '0' && isDigit(cleaned[start+1]) {
+ if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) {
return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes")
}
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/test/tools/vendor/github.com/pelletier/go-toml/v2/marshaler.go
index 6ab1d823842..6fe78533c1c 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/marshaler.go
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/marshaler.go
@@ -148,6 +148,9 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder {
//
// The "omitempty" option prevents empty values or groups from being emitted.
//
+// The "commented" option prefixes the value and all its children with a comment
+// symbol.
+//
// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit
// a TOML comment before the value being annotated. Comments are ignored inside
// inline tables. For array tables, the comment is only present before the first
@@ -180,6 +183,7 @@ func (enc *Encoder) Encode(v interface{}) error {
type valueOptions struct {
multiline bool
omitempty bool
+ commented bool
comment string
}
@@ -205,6 +209,9 @@ type encoderCtx struct {
// Indentation level
indent int
+ // Prefix the current value with a comment.
+ commented bool
+
// Options coming from struct tags
options valueOptions
}
@@ -273,7 +280,7 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e
return enc.encodeMap(b, ctx, v)
case reflect.Struct:
return enc.encodeStruct(b, ctx, v)
- case reflect.Slice:
+ case reflect.Slice, reflect.Array:
return enc.encodeSlice(b, ctx, v)
case reflect.Interface:
if v.IsNil() {
@@ -357,6 +364,7 @@ func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v r
if !ctx.inline {
b = enc.encodeComment(ctx.indent, options.comment, b)
+ b = enc.commented(ctx.commented, b)
b = enc.indent(ctx.indent, b)
}
@@ -378,6 +386,13 @@ func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v r
return b, nil
}
+func (enc *Encoder) commented(commented bool, b []byte) []byte {
+ if commented {
+ return append(b, "# "...)
+ }
+ return b
+}
+
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Struct:
@@ -526,6 +541,8 @@ func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error)
b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
+ b = enc.commented(ctx.commented, b)
+
b = enc.indent(ctx.indent, b)
b = append(b, '[')
@@ -704,6 +721,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) {
options := valueOptions{
multiline: opts.multiline,
omitempty: opts.omitempty,
+ commented: opts.commented,
comment: fieldType.Tag.Get("comment"),
}
@@ -763,6 +781,7 @@ type tagOptions struct {
multiline bool
inline bool
omitempty bool
+ commented bool
}
func parseTag(tag string) (string, tagOptions) {
@@ -790,6 +809,8 @@ func parseTag(tag string) (string, tagOptions) {
opts.inline = true
case "omitempty":
opts.omitempty = true
+ case "commented":
+ opts.commented = true
}
}
@@ -825,8 +846,10 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
hasNonEmptyKV = true
ctx.setKey(kv.Key)
+ ctx2 := ctx
+ ctx2.commented = kv.Options.commented || ctx2.commented
- b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value)
+ b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value)
if err != nil {
return nil, err
}
@@ -851,8 +874,10 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro
ctx.setKey(table.Key)
ctx.options = table.Options
+ ctx2 := ctx
+ ctx2.commented = ctx2.commented || ctx.options.commented
- b, err = enc.encode(b, ctx, table.Value)
+ b, err = enc.encode(b, ctx2, table.Value)
if err != nil {
return nil, err
}
@@ -930,7 +955,7 @@ func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool {
return willConvertToTableOrArrayTable(ctx, v.Elem())
}
- if t.Kind() == reflect.Slice {
+ if t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
if v.Len() == 0 {
// An empty slice should be a kv = [].
return false
@@ -970,6 +995,9 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.
ctx.shiftKey()
scratch := make([]byte, 0, 64)
+
+ scratch = enc.commented(ctx.commented, scratch)
+
scratch = append(scratch, "[["...)
for i, k := range ctx.parentKey {
@@ -985,6 +1013,10 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.
b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
+ if enc.indentTables {
+ ctx.indent++
+ }
+
for i := 0; i < v.Len(); i++ {
if i != 0 {
b = append(b, "\n"...)
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/test/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
index 393503431d4..c5e5f339017 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
@@ -149,12 +149,16 @@ type errorContext struct {
}
func (d *decoder) typeMismatchError(toml string, target reflect.Type) error {
+ return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target))
+}
+
+func (d *decoder) typeMismatchString(toml string, target reflect.Type) string {
if d.errorContext != nil && d.errorContext.Struct != nil {
ctx := d.errorContext
f := ctx.Struct.FieldByIndex(ctx.Field)
- return fmt.Errorf("toml: cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type)
+ return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type)
}
- return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target)
+ return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target)
}
func (d *decoder) expr() *unstable.Node {
@@ -963,7 +967,7 @@ func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error
case reflect.Interface:
r = reflect.ValueOf(i)
default:
- return d.typeMismatchError("integer", v.Type())
+ return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type()))
}
if !r.Type().AssignableTo(v.Type()) {
@@ -982,7 +986,7 @@ func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error {
case reflect.Interface:
v.Set(reflect.ValueOf(string(value.Data)))
default:
- return unstable.NewParserError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind())
+ return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type()))
}
return nil
@@ -1093,9 +1097,9 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node
f := fieldByIndex(v, path)
- if !f.CanSet() {
- // If the field is not settable, need to take a slower path and make a copy of
- // the struct itself to a new location.
+ if !f.CanAddr() {
+ // If the field is not addressable, need to take a slower path and
+ // make a copy of the struct itself to a new location.
nvp := reflect.New(v.Type())
nvp.Elem().Set(v)
v = nvp.Elem()
@@ -1170,10 +1174,10 @@ func initAndDereferencePointer(v reflect.Value) reflect.Value {
// Same as reflect.Value.FieldByIndex, but creates pointers if needed.
func fieldByIndex(v reflect.Value, path []int) reflect.Value {
- for i, x := range path {
+ for _, x := range path {
v = v.Field(x)
- if i < len(path)-1 && v.Kind() == reflect.Ptr {
+ if v.Kind() == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
diff --git a/test/tools/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/test/tools/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
index a8eb05294ac..50358a44ffd 100644
--- a/test/tools/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
+++ b/test/tools/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
@@ -1013,6 +1013,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error)
return p.builder.Push(Node{
Kind: Float,
Data: b[:3],
+ Raw: p.Range(b[:3]),
}), b[3:], nil
case 'n':
if !scanFollowsNan(b) {
@@ -1022,6 +1023,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error)
return p.builder.Push(Node{
Kind: Float,
Data: b[:3],
+ Raw: p.Range(b[:3]),
}), b[3:], nil
case '+', '-':
return p.scanIntOrFloat(b)
@@ -1146,6 +1148,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
return p.builder.Push(Node{
Kind: Integer,
Data: b[:i],
+ Raw: p.Range(b[:i]),
}), b[i:], nil
}
@@ -1169,6 +1172,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
return p.builder.Push(Node{
Kind: Float,
Data: b[:i+3],
+ Raw: p.Range(b[:i+3]),
}), b[i+3:], nil
}
@@ -1180,6 +1184,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
return p.builder.Push(Node{
Kind: Float,
Data: b[:i+3],
+ Raw: p.Range(b[:i+3]),
}), b[i+3:], nil
}
@@ -1202,6 +1207,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) {
return p.builder.Push(Node{
Kind: kind,
Data: b[:i],
+ Raw: p.Range(b[:i]),
}), b[i:], nil
}
diff --git a/test/tools/vendor/github.com/pkg/errors/.gitignore b/test/tools/vendor/github.com/pkg/errors/.gitignore
deleted file mode 100644
index daf913b1b34..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/test/tools/vendor/github.com/pkg/errors/.travis.yml b/test/tools/vendor/github.com/pkg/errors/.travis.yml
deleted file mode 100644
index 9159de03e03..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go_import_path: github.com/pkg/errors
-go:
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - tip
-
-script:
- - make check
diff --git a/test/tools/vendor/github.com/pkg/errors/LICENSE b/test/tools/vendor/github.com/pkg/errors/LICENSE
deleted file mode 100644
index 835ba3e755c..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-Copyright (c) 2015, Dave Cheney
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/test/tools/vendor/github.com/pkg/errors/Makefile b/test/tools/vendor/github.com/pkg/errors/Makefile
deleted file mode 100644
index ce9d7cded64..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-PKGS := github.com/pkg/errors
-SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
-GO := go
-
-check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
-
-test:
- $(GO) test $(PKGS)
-
-vet: | test
- $(GO) vet $(PKGS)
-
-staticcheck:
- $(GO) get honnef.co/go/tools/cmd/staticcheck
- staticcheck -checks all $(PKGS)
-
-misspell:
- $(GO) get github.com/client9/misspell/cmd/misspell
- misspell \
- -locale GB \
- -error \
- *.md *.go
-
-unconvert:
- $(GO) get github.com/mdempsky/unconvert
- unconvert -v $(PKGS)
-
-ineffassign:
- $(GO) get github.com/gordonklaus/ineffassign
- find $(SRCDIRS) -name '*.go' | xargs ineffassign
-
-pedantic: check errcheck
-
-unparam:
- $(GO) get mvdan.cc/unparam
- unparam ./...
-
-errcheck:
- $(GO) get github.com/kisielk/errcheck
- errcheck $(PKGS)
-
-gofmt:
- @echo Checking code is gofmted
- @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/test/tools/vendor/github.com/pkg/errors/README.md b/test/tools/vendor/github.com/pkg/errors/README.md
deleted file mode 100644
index 54dfdcb12ea..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
-
-Package errors provides simple error handling primitives.
-
-`go get github.com/pkg/errors`
-
-The traditional error handling idiom in Go is roughly akin to
-```go
-if err != nil {
- return err
-}
-```
-which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
-
-## Adding context to an error
-
-The errors.Wrap function returns a new error that adds context to the original error. For example
-```go
-_, err := ioutil.ReadAll(r)
-if err != nil {
- return errors.Wrap(err, "read failed")
-}
-```
-## Retrieving the cause of an error
-
-Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
-```go
-type causer interface {
- Cause() error
-}
-```
-`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
-```go
-switch err := errors.Cause(err).(type) {
-case *MyError:
- // handle specifically
-default:
- // unknown error
-}
-```
-
-[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
-
-## Roadmap
-
-With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
-
-- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
-- 1.0. Final release.
-
-## Contributing
-
-Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
-
-Before sending a PR, please discuss your change by raising an issue.
-
-## License
-
-BSD-2-Clause
diff --git a/test/tools/vendor/github.com/pkg/errors/appveyor.yml b/test/tools/vendor/github.com/pkg/errors/appveyor.yml
deleted file mode 100644
index a932eade024..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: build-{build}.{branch}
-
-clone_folder: C:\gopath\src\github.com\pkg\errors
-shallow_clone: true # for startup speed
-
-environment:
- GOPATH: C:\gopath
-
-platform:
- - x64
-
-# http://www.appveyor.com/docs/installed-software
-install:
- # some helpful output for debugging builds
- - go version
- - go env
- # pre-installed MinGW at C:\MinGW is 32bit only
- # but MSYS2 at C:\msys64 has mingw64
- - set PATH=C:\msys64\mingw64\bin;%PATH%
- - gcc --version
- - g++ --version
-
-build_script:
- - go install -v ./...
-
-test_script:
- - set PATH=C:\gopath\bin;%PATH%
- - go test -v ./...
-
-#artifacts:
-# - path: '%GOPATH%\bin\*.exe'
-deploy: off
diff --git a/test/tools/vendor/github.com/pkg/errors/errors.go b/test/tools/vendor/github.com/pkg/errors/errors.go
deleted file mode 100644
index 161aea25829..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/errors.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Package errors provides simple error handling primitives.
-//
-// The traditional error handling idiom in Go is roughly akin to
-//
-// if err != nil {
-// return err
-// }
-//
-// which when applied recursively up the call stack results in error reports
-// without context or debugging information. The errors package allows
-// programmers to add context to the failure path in their code in a way
-// that does not destroy the original value of the error.
-//
-// Adding context to an error
-//
-// The errors.Wrap function returns a new error that adds context to the
-// original error by recording a stack trace at the point Wrap is called,
-// together with the supplied message. For example
-//
-// _, err := ioutil.ReadAll(r)
-// if err != nil {
-// return errors.Wrap(err, "read failed")
-// }
-//
-// If additional control is required, the errors.WithStack and
-// errors.WithMessage functions destructure errors.Wrap into its component
-// operations: annotating an error with a stack trace and with a message,
-// respectively.
-//
-// Retrieving the cause of an error
-//
-// Using errors.Wrap constructs a stack of errors, adding context to the
-// preceding error. Depending on the nature of the error it may be necessary
-// to reverse the operation of errors.Wrap to retrieve the original error
-// for inspection. Any error value which implements this interface
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// can be inspected by errors.Cause. errors.Cause will recursively retrieve
-// the topmost error that does not implement causer, which is assumed to be
-// the original cause. For example:
-//
-// switch err := errors.Cause(err).(type) {
-// case *MyError:
-// // handle specifically
-// default:
-// // unknown error
-// }
-//
-// Although the causer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// Formatted printing of errors
-//
-// All error values returned from this package implement fmt.Formatter and can
-// be formatted by the fmt package. The following verbs are supported:
-//
-// %s print the error. If the error has a Cause it will be
-// printed recursively.
-// %v see %s
-// %+v extended format. Each Frame of the error's StackTrace will
-// be printed in detail.
-//
-// Retrieving the stack trace of an error or wrapper
-//
-// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
-// invoked. This information can be retrieved with the following interface:
-//
-// type stackTracer interface {
-// StackTrace() errors.StackTrace
-// }
-//
-// The returned errors.StackTrace type is defined as
-//
-// type StackTrace []Frame
-//
-// The Frame type represents a call site in the stack trace. Frame supports
-// the fmt.Formatter interface that can be used for printing information about
-// the stack trace of this error. For example:
-//
-// if err, ok := err.(stackTracer); ok {
-// for _, f := range err.StackTrace() {
-// fmt.Printf("%+s:%d\n", f, f)
-// }
-// }
-//
-// Although the stackTracer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// See the documentation for Frame.Format for more details.
-package errors
-
-import (
- "fmt"
- "io"
-)
-
-// New returns an error with the supplied message.
-// New also records the stack trace at the point it was called.
-func New(message string) error {
- return &fundamental{
- msg: message,
- stack: callers(),
- }
-}
-
-// Errorf formats according to a format specifier and returns the string
-// as a value that satisfies error.
-// Errorf also records the stack trace at the point it was called.
-func Errorf(format string, args ...interface{}) error {
- return &fundamental{
- msg: fmt.Sprintf(format, args...),
- stack: callers(),
- }
-}
-
-// fundamental is an error that has a message and a stack, but no caller.
-type fundamental struct {
- msg string
- *stack
-}
-
-func (f *fundamental) Error() string { return f.msg }
-
-func (f *fundamental) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- io.WriteString(s, f.msg)
- f.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, f.msg)
- case 'q':
- fmt.Fprintf(s, "%q", f.msg)
- }
-}
-
-// WithStack annotates err with a stack trace at the point WithStack was called.
-// If err is nil, WithStack returns nil.
-func WithStack(err error) error {
- if err == nil {
- return nil
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-type withStack struct {
- error
- *stack
-}
-
-func (w *withStack) Cause() error { return w.error }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withStack) Unwrap() error { return w.error }
-
-func (w *withStack) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v", w.Cause())
- w.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, w.Error())
- case 'q':
- fmt.Fprintf(s, "%q", w.Error())
- }
-}
-
-// Wrap returns an error annotating err with a stack trace
-// at the point Wrap is called, and the supplied message.
-// If err is nil, Wrap returns nil.
-func Wrap(err error, message string) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: message,
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// Wrapf returns an error annotating err with a stack trace
-// at the point Wrapf is called, and the format specifier.
-// If err is nil, Wrapf returns nil.
-func Wrapf(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// WithMessage annotates err with a new message.
-// If err is nil, WithMessage returns nil.
-func WithMessage(err error, message string) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: message,
- }
-}
-
-// WithMessagef annotates err with the format specifier.
-// If err is nil, WithMessagef returns nil.
-func WithMessagef(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
-}
-
-type withMessage struct {
- cause error
- msg string
-}
-
-func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
-func (w *withMessage) Cause() error { return w.cause }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withMessage) Unwrap() error { return w.cause }
-
-func (w *withMessage) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v\n", w.Cause())
- io.WriteString(s, w.msg)
- return
- }
- fallthrough
- case 's', 'q':
- io.WriteString(s, w.Error())
- }
-}
-
-// Cause returns the underlying cause of the error, if possible.
-// An error value has a cause if it implements the following
-// interface:
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// If the error does not implement Cause, the original error will
-// be returned. If the error is nil, nil will be returned without further
-// investigation.
-func Cause(err error) error {
- type causer interface {
- Cause() error
- }
-
- for err != nil {
- cause, ok := err.(causer)
- if !ok {
- break
- }
- err = cause.Cause()
- }
- return err
-}
diff --git a/test/tools/vendor/github.com/pkg/errors/go113.go b/test/tools/vendor/github.com/pkg/errors/go113.go
deleted file mode 100644
index be0d10d0c79..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/go113.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build go1.13
-
-package errors
-
-import (
- stderrors "errors"
-)
-
-// Is reports whether any error in err's chain matches target.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error is considered to match a target if it is equal to that target or if
-// it implements a method Is(error) bool such that Is(target) returns true.
-func Is(err, target error) bool { return stderrors.Is(err, target) }
-
-// As finds the first error in err's chain that matches target, and if so, sets
-// target to that error value and returns true.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error matches target if the error's concrete value is assignable to the value
-// pointed to by target, or if the error has a method As(interface{}) bool such that
-// As(target) returns true. In the latter case, the As method is responsible for
-// setting target.
-//
-// As will panic if target is not a non-nil pointer to either a type that implements
-// error, or to any interface type. As returns false if err is nil.
-func As(err error, target interface{}) bool { return stderrors.As(err, target) }
-
-// Unwrap returns the result of calling the Unwrap method on err, if err's
-// type contains an Unwrap method returning error.
-// Otherwise, Unwrap returns nil.
-func Unwrap(err error) error {
- return stderrors.Unwrap(err)
-}
diff --git a/test/tools/vendor/github.com/pkg/errors/stack.go b/test/tools/vendor/github.com/pkg/errors/stack.go
deleted file mode 100644
index 779a8348fb9..00000000000
--- a/test/tools/vendor/github.com/pkg/errors/stack.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package errors
-
-import (
- "fmt"
- "io"
- "path"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Frame represents a program counter inside a stack frame.
-// For historical reasons if Frame is interpreted as a uintptr
-// its value represents the program counter + 1.
-type Frame uintptr
-
-// pc returns the program counter for this frame;
-// multiple frames may have the same PC value.
-func (f Frame) pc() uintptr { return uintptr(f) - 1 }
-
-// file returns the full path to the file that contains the
-// function for this Frame's pc.
-func (f Frame) file() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- file, _ := fn.FileLine(f.pc())
- return file
-}
-
-// line returns the line number of source code of the
-// function for this Frame's pc.
-func (f Frame) line() int {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return 0
- }
- _, line := fn.FileLine(f.pc())
- return line
-}
-
-// name returns the name of this function, if known.
-func (f Frame) name() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- return fn.Name()
-}
-
-// Format formats the frame according to the fmt.Formatter interface.
-//
-// %s source file
-// %d source line
-// %n function name
-// %v equivalent to %s:%d
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+s function name and path of source file relative to the compile time
-// GOPATH separated by \n\t (\n\t)
-// %+v equivalent to %+s:%d
-func (f Frame) Format(s fmt.State, verb rune) {
- switch verb {
- case 's':
- switch {
- case s.Flag('+'):
- io.WriteString(s, f.name())
- io.WriteString(s, "\n\t")
- io.WriteString(s, f.file())
- default:
- io.WriteString(s, path.Base(f.file()))
- }
- case 'd':
- io.WriteString(s, strconv.Itoa(f.line()))
- case 'n':
- io.WriteString(s, funcname(f.name()))
- case 'v':
- f.Format(s, 's')
- io.WriteString(s, ":")
- f.Format(s, 'd')
- }
-}
-
-// MarshalText formats a stacktrace Frame as a text string. The output is the
-// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
-func (f Frame) MarshalText() ([]byte, error) {
- name := f.name()
- if name == "unknown" {
- return []byte(name), nil
- }
- return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
-}
-
-// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
-type StackTrace []Frame
-
-// Format formats the stack of Frames according to the fmt.Formatter interface.
-//
-// %s lists source files for each Frame in the stack
-// %v lists the source file and line number for each Frame in the stack
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+v Prints filename, function, and line number for each Frame in the stack.
-func (st StackTrace) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case s.Flag('+'):
- for _, f := range st {
- io.WriteString(s, "\n")
- f.Format(s, verb)
- }
- case s.Flag('#'):
- fmt.Fprintf(s, "%#v", []Frame(st))
- default:
- st.formatSlice(s, verb)
- }
- case 's':
- st.formatSlice(s, verb)
- }
-}
-
-// formatSlice will format this StackTrace into the given buffer as a slice of
-// Frame, only valid when called with '%s' or '%v'.
-func (st StackTrace) formatSlice(s fmt.State, verb rune) {
- io.WriteString(s, "[")
- for i, f := range st {
- if i > 0 {
- io.WriteString(s, " ")
- }
- f.Format(s, verb)
- }
- io.WriteString(s, "]")
-}
-
-// stack represents a stack of program counters.
-type stack []uintptr
-
-func (s *stack) Format(st fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case st.Flag('+'):
- for _, pc := range *s {
- f := Frame(pc)
- fmt.Fprintf(st, "\n%+v", f)
- }
- }
- }
-}
-
-func (s *stack) StackTrace() StackTrace {
- f := make([]Frame, len(*s))
- for i := 0; i < len(f); i++ {
- f[i] = Frame((*s)[i])
- }
- return f
-}
-
-func callers() *stack {
- const depth = 32
- var pcs [depth]uintptr
- n := runtime.Callers(3, pcs[:])
- var st stack = pcs[0:n]
- return &st
-}
-
-// funcname removes the path prefix component of a function's name reported by func.Name().
-func funcname(name string) string {
- i := strings.LastIndex(name, "/")
- name = name[i+1:]
- i = strings.Index(name, ".")
- return name[i+1:]
-}
diff --git a/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go b/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go
index 4e4c29459ed..98e4e38f4aa 100644
--- a/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go
+++ b/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem.go
@@ -1,6 +1,3 @@
-//go:build go1.12
-// +build go1.12
-
package fmtsort
import "reflect"
diff --git a/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go b/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go
deleted file mode 100644
index 873bf7f5e8e..00000000000
--- a/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/mapelem_1.11.go
+++ /dev/null
@@ -1,24 +0,0 @@
-//go:build !go1.12
-// +build !go1.12
-
-package fmtsort
-
-import "reflect"
-
-const brokenNaNs = true
-
-func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) {
- key := mapValue.MapKeys()
- value := make([]reflect.Value, 0, len(key))
- for _, k := range key {
- v := mapValue.MapIndex(k)
- if !v.IsValid() {
- // Note: we can't retrieve the value, probably because
- // the key is NaN, so just do the best we can and
- // add a zero value of the correct type in that case.
- v = reflect.Zero(mapValue.Type().Elem())
- }
- value = append(value, v)
- }
- return key, value
-}
diff --git a/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/sort.go b/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/sort.go
index 0fb5187dd84..7f51854178f 100644
--- a/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/sort.go
+++ b/test/tools/vendor/github.com/rogpeppe/go-internal/fmtsort/sort.go
@@ -36,19 +36,18 @@ func (o *SortedMap) Swap(i, j int) {
//
// The ordering rules are more general than with Go's < operator:
//
-// - when applicable, nil compares low
-// - ints, floats, and strings order by <
-// - NaN compares less than non-NaN floats
-// - bool compares false before true
-// - complex compares real, then imag
-// - pointers compare by machine address
-// - channel values compare by machine address
-// - structs compare each field in turn
-// - arrays compare each element in turn.
-// Otherwise identical arrays compare by length.
-// - interface values compare first by reflect.Type describing the concrete type
-// and then by concrete value as described in the previous rules.
-//
+// - when applicable, nil compares low
+// - ints, floats, and strings order by <
+// - NaN compares less than non-NaN floats
+// - bool compares false before true
+// - complex compares real, then imag
+// - pointers compare by machine address
+// - channel values compare by machine address
+// - structs compare each field in turn
+// - arrays compare each element in turn.
+// Otherwise identical arrays compare by length.
+// - interface values compare first by reflect.Type describing the concrete type
+// and then by concrete value as described in the previous rules.
func Sort(mapValue reflect.Value) *SortedMap {
if mapValue.Type().Kind() != reflect.Map {
return nil
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig b/test/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig
new file mode 100644
index 00000000000..6f944f5406c
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig
@@ -0,0 +1,21 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[{Makefile,*.mk}]
+indent_style = tab
+
+[*.nix]
+indent_size = 2
+
+[*.go]
+indent_style = tab
+
+[{*.yml,*.yaml}]
+indent_size = 2
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/.envrc b/test/tools/vendor/github.com/sagikazarmark/locafero/.envrc
new file mode 100644
index 00000000000..3ce7171a3c5
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/.envrc
@@ -0,0 +1,4 @@
+if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then
+ source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8="
+fi
+use flake . --impure
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/.gitignore b/test/tools/vendor/github.com/sagikazarmark/locafero/.gitignore
new file mode 100644
index 00000000000..8f07e601636
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/.gitignore
@@ -0,0 +1,8 @@
+/.devenv/
+/.direnv/
+/.task/
+/bin/
+/build/
+/tmp/
+/var/
+/vendor/
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/test/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml
new file mode 100644
index 00000000000..829de2a4a01
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml
@@ -0,0 +1,27 @@
+run:
+ timeout: 10m
+
+linters-settings:
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/sagikazarmark/locafero)
+ goimports:
+ local-prefixes: github.com/sagikazarmark/locafero
+ misspell:
+ locale: US
+ nolintlint:
+ allow-leading-space: false # require machine-readable nolint directives (with no leading space)
+ allow-unused: false # report any unused nolint directives
+ require-specific: false # don't require nolint directives to be specific about which linter is being skipped
+ revive:
+ confidence: 0
+
+linters:
+ enable:
+ - gci
+ - goimports
+ - misspell
+ - nolintlint
+ - revive
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/LICENSE b/test/tools/vendor/github.com/sagikazarmark/locafero/LICENSE
new file mode 100644
index 00000000000..a70b0f2960f
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2023 Márk Sági-Kazár
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/README.md b/test/tools/vendor/github.com/sagikazarmark/locafero/README.md
new file mode 100644
index 00000000000..a48e8e9789e
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/README.md
@@ -0,0 +1,37 @@
+# Finder library for [Afero](https://github.com/spf13/afero)
+
+[](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml)
+[](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero)
+
+[](https://builtwithnix.org)
+
+**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).**
+
+> [!WARNING]
+> This is an experimental library under development.
+>
+> **Backwards compatibility is not guaranteed, expect breaking changes.**
+
+## Installation
+
+```shell
+go get github.com/sagikazarmark/locafero
+```
+
+## Usage
+
+Check out the [package example](https://pkg.go.dev/github.com/sagikazarmark/locafero#example-package) on go.dev.
+
+## Development
+
+**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).**
+
+Run the test suite:
+
+```shell
+just test
+```
+
+## License
+
+The project is licensed under the [MIT License](LICENSE).
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/file_type.go b/test/tools/vendor/github.com/sagikazarmark/locafero/file_type.go
new file mode 100644
index 00000000000..9a9b140233f
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/file_type.go
@@ -0,0 +1,28 @@
+package locafero
+
+import "io/fs"
+
+// FileType represents the kind of entries [Finder] can return.
+type FileType int
+
+const (
+ FileTypeAll FileType = iota
+ FileTypeFile
+ FileTypeDir
+)
+
+func (ft FileType) matchFileInfo(info fs.FileInfo) bool {
+ switch ft {
+ case FileTypeAll:
+ return true
+
+ case FileTypeFile:
+ return !info.IsDir()
+
+ case FileTypeDir:
+ return info.IsDir()
+
+ default:
+ return false
+ }
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/finder.go b/test/tools/vendor/github.com/sagikazarmark/locafero/finder.go
new file mode 100644
index 00000000000..754c8b260e6
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/finder.go
@@ -0,0 +1,165 @@
+// Package finder looks for files and directories in an {fs.Fs} filesystem.
+package locafero
+
+import (
+ "errors"
+ "io/fs"
+ "path/filepath"
+ "strings"
+
+ "github.com/sourcegraph/conc/iter"
+ "github.com/spf13/afero"
+)
+
+// Finder looks for files and directories in an [afero.Fs] filesystem.
+type Finder struct {
+ // Paths represents a list of locations that the [Finder] will search in.
+ //
+ // They are essentially the root directories or starting points for the search.
+ //
+ // Examples:
+ // - home/user
+ // - etc
+ Paths []string
+
+ // Names are specific entries that the [Finder] will look for within the given Paths.
+ //
+ // It provides the capability to search for entries with depth,
+ // meaning it can target deeper locations within the directory structure.
+ //
+ // It also supports glob syntax (as defined by [filepat.Match]), offering greater flexibility in search patterns.
+ //
+ // Examples:
+ // - config.yaml
+ // - home/*/config.yaml
+ // - home/*/config.*
+ Names []string
+
+ // Type restricts the kind of entries returned by the [Finder].
+ //
+ // This parameter helps in differentiating and filtering out files from directories or vice versa.
+ Type FileType
+}
+
+// Find looks for files and directories in an [afero.Fs] filesystem.
+func (f Finder) Find(fsys afero.Fs) ([]string, error) {
+ // Arbitrary go routine limit (TODO: make this a parameter)
+ // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError()
+
+ type searchItem struct {
+ path string
+ name string
+ }
+
+ var searchItems []searchItem
+
+ for _, searchPath := range f.Paths {
+ searchPath := searchPath
+
+ for _, searchName := range f.Names {
+ searchName := searchName
+
+ searchItems = append(searchItems, searchItem{searchPath, searchName})
+
+ // pool.Go(func() ([]string, error) {
+ // // If the name contains any glob character, perform a glob match
+ // if strings.ContainsAny(searchName, "*?[]\\^") {
+ // return globWalkSearch(fsys, searchPath, searchName, f.Type)
+ // }
+ //
+ // return statSearch(fsys, searchPath, searchName, f.Type)
+ // })
+ }
+ }
+
+ // allResults, err := pool.Wait()
+ // if err != nil {
+ // return nil, err
+ // }
+
+ allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) {
+ // If the name contains any glob character, perform a glob match
+ if strings.ContainsAny(item.name, "*?[]\\^") {
+ return globWalkSearch(fsys, item.path, item.name, f.Type)
+ }
+
+ return statSearch(fsys, item.path, item.name, f.Type)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var results []string
+
+ for _, r := range allResults {
+ results = append(results, r...)
+ }
+
+ // Sort results in alphabetical order for now
+ // sort.Strings(results)
+
+ return results, nil
+}
+
+func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) {
+ var results []string
+
+ err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Skip the root path
+ if p == searchPath {
+ return nil
+ }
+
+ var result error
+
+ // Stop reading subdirectories
+ // TODO: add depth detection here
+ if fileInfo.IsDir() && filepath.Dir(p) == searchPath {
+ result = fs.SkipDir
+ }
+
+ // Skip unmatching type
+ if !searchType.matchFileInfo(fileInfo) {
+ return result
+ }
+
+ match, err := filepath.Match(searchName, fileInfo.Name())
+ if err != nil {
+ return err
+ }
+
+ if match {
+ results = append(results, p)
+ }
+
+ return result
+ })
+ if err != nil {
+ return results, err
+ }
+
+ return results, nil
+}
+
+func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) {
+ filePath := filepath.Join(searchPath, searchName)
+
+ fileInfo, err := fsys.Stat(filePath)
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Skip unmatching type
+ if !searchType.matchFileInfo(fileInfo) {
+ return nil, nil
+ }
+
+ return []string{filePath}, nil
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/flake.lock b/test/tools/vendor/github.com/sagikazarmark/locafero/flake.lock
new file mode 100644
index 00000000000..46d28f805ad
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/flake.lock
@@ -0,0 +1,273 @@
+{
+ "nodes": {
+ "devenv": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "nix": "nix",
+ "nixpkgs": "nixpkgs",
+ "pre-commit-hooks": "pre-commit-hooks"
+ },
+ "locked": {
+ "lastModified": 1694097209,
+ "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-parts": {
+ "inputs": {
+ "nixpkgs-lib": "nixpkgs-lib"
+ },
+ "locked": {
+ "lastModified": 1693611461,
+ "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=",
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1685518550,
+ "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "pre-commit-hooks",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1660459072,
+ "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "lowdown-src": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1633514407,
+ "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "type": "github"
+ }
+ },
+ "nix": {
+ "inputs": {
+ "lowdown-src": "lowdown-src",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression"
+ },
+ "locked": {
+ "lastModified": 1676545802,
+ "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "relaxed-flakes",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1678875422,
+ "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-lib": {
+ "locked": {
+ "dir": "lib",
+ "lastModified": 1693471703,
+ "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85",
+ "type": "github"
+ },
+ "original": {
+ "dir": "lib",
+ "owner": "NixOS",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-regression": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-stable": {
+ "locked": {
+ "lastModified": 1685801374,
+ "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-23.05",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+ "lastModified": 1694343207,
+ "narHash": "sha256-jWi7OwFxU5Owi4k2JmiL1sa/OuBCQtpaAesuj5LXC8w=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "78058d810644f5ed276804ce7ea9e82d92bee293",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "pre-commit-hooks": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "flake-utils": "flake-utils",
+ "gitignore": "gitignore",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+ "lastModified": 1688056373,
+ "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "devenv": "devenv",
+ "flake-parts": "flake-parts",
+ "nixpkgs": "nixpkgs_2"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/flake.nix b/test/tools/vendor/github.com/sagikazarmark/locafero/flake.nix
new file mode 100644
index 00000000000..209ecf28601
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/flake.nix
@@ -0,0 +1,47 @@
+{
+ description = "Finder library for Afero";
+
+ inputs = {
+ nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+ flake-parts.url = "github:hercules-ci/flake-parts";
+ devenv.url = "github:cachix/devenv";
+ };
+
+ outputs = inputs@{ flake-parts, ... }:
+ flake-parts.lib.mkFlake { inherit inputs; } {
+ imports = [
+ inputs.devenv.flakeModule
+ ];
+
+ systems = [ "x86_64-linux" "aarch64-darwin" ];
+
+ perSystem = { config, self', inputs', pkgs, system, ... }: rec {
+ devenv.shells = {
+ default = {
+ languages = {
+ go.enable = true;
+ };
+
+ packages = with pkgs; [
+ just
+
+ golangci-lint
+ ];
+
+ # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
+ containers = pkgs.lib.mkForce { };
+ };
+
+ ci = devenv.shells.default;
+
+ ci_1_20 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_20;
+ };
+ };
+ };
+ };
+ };
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/helpers.go b/test/tools/vendor/github.com/sagikazarmark/locafero/helpers.go
new file mode 100644
index 00000000000..05b434481f4
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/helpers.go
@@ -0,0 +1,41 @@
+package locafero
+
+import "fmt"
+
+// NameWithExtensions creates a list of names from a base name and a list of extensions.
+//
+// TODO: find a better name for this function.
+func NameWithExtensions(baseName string, extensions ...string) []string {
+ var names []string
+
+ if baseName == "" {
+ return names
+ }
+
+ for _, ext := range extensions {
+ if ext == "" {
+ continue
+ }
+
+ names = append(names, fmt.Sprintf("%s.%s", baseName, ext))
+ }
+
+ return names
+}
+
+// NameWithOptionalExtensions creates a list of names from a base name and a list of extensions,
+// plus it adds the base name (without any extensions) to the end of the list.
+//
+// TODO: find a better name for this function.
+func NameWithOptionalExtensions(baseName string, extensions ...string) []string {
+ var names []string
+
+ if baseName == "" {
+ return names
+ }
+
+ names = NameWithExtensions(baseName, extensions...)
+ names = append(names, baseName)
+
+ return names
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/locafero/justfile b/test/tools/vendor/github.com/sagikazarmark/locafero/justfile
new file mode 100644
index 00000000000..00a88850cc8
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/locafero/justfile
@@ -0,0 +1,11 @@
+default:
+ just --list
+
+test:
+ go test -race -v ./...
+
+lint:
+ golangci-lint run
+
+fmt:
+ golangci-lint run --fix
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/.editorconfig b/test/tools/vendor/github.com/sagikazarmark/slog-shim/.editorconfig
new file mode 100644
index 00000000000..1fb0e1bec60
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/.editorconfig
@@ -0,0 +1,18 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.nix]
+indent_size = 2
+
+[{Makefile,*.mk}]
+indent_style = tab
+
+[Taskfile.yaml]
+indent_size = 2
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/.envrc b/test/tools/vendor/github.com/sagikazarmark/slog-shim/.envrc
new file mode 100644
index 00000000000..3ce7171a3c5
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/.envrc
@@ -0,0 +1,4 @@
+if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then
+ source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8="
+fi
+use flake . --impure
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/.gitignore b/test/tools/vendor/github.com/sagikazarmark/slog-shim/.gitignore
new file mode 100644
index 00000000000..dc6d8b5875f
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/.gitignore
@@ -0,0 +1,4 @@
+/.devenv/
+/.direnv/
+/.task/
+/build/
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/LICENSE b/test/tools/vendor/github.com/sagikazarmark/slog-shim/LICENSE
new file mode 100644
index 00000000000..6a66aea5eaf
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/README.md b/test/tools/vendor/github.com/sagikazarmark/slog-shim/README.md
new file mode 100644
index 00000000000..1f5be85e101
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/README.md
@@ -0,0 +1,81 @@
+# [slog](https://pkg.go.dev/log/slog) shim
+
+[](https://github.com/sagikazarmark/slog-shim/actions/workflows/ci.yaml)
+[](https://pkg.go.dev/mod/github.com/sagikazarmark/slog-shim)
+
+[](https://builtwithnix.org)
+
+Go 1.21 introduced a [new structured logging package](https://golang.org/doc/go1.21#slog), `log/slog`, to the standard library.
+Although it's been eagerly anticipated by many, widespread adoption isn't expected to occur immediately,
+especially since updating to Go 1.21 is a decision that most libraries won't make overnight.
+
+Before this package was added to the standard library, there was an _experimental_ version available at [golang.org/x/exp/slog](https://pkg.go.dev/golang.org/x/exp/slog).
+While it's generally advised against using experimental packages in production,
+this one served as a sort of backport package for the last few years,
+incorporating new features before they were added to the standard library (like `slices`, `maps` or `errors`).
+
+This package serves as a bridge, helping libraries integrate slog in a backward-compatible way without having to immediately update their Go version requirement to 1.21. On Go 1.21 (and above), it acts as a drop-in replacement for `log/slog`, while below 1.21 it falls back to `golang.org/x/exp/slog`.
+
+**How does it achieve backwards compatibility?**
+
+Although there's no consensus on whether dropping support for older Go versions is considered backward compatible, a majority seems to believe it is.
+(I don't have scientific proof for this, but it's based on conversations with various individuals across different channels.)
+
+This package adheres to that interpretation of backward compatibility. On Go 1.21, the shim uses type aliases to offer the same API as `slog/log`.
+Once a library upgrades its version requirement to Go 1.21, it should be able to discard this shim and use `log/slog` directly.
+
+For older Go versions, the library might become unstable after removing the shim.
+However, since those older versions are no longer supported, the promise of backward compatibility remains intact.
+
+## Installation
+
+```shell
+go get github.com/sagikazarmark/slog-shim
+```
+
+## Usage
+
+Import this package into your library and use it in your public API:
+
+```go
+package mylib
+
+import slog "github.com/sagikazarmark/slog-shim"
+
+func New(logger *slog.Logger) MyLib {
+ // ...
+}
+```
+
+When using the library, clients can either use `log/slog` (when on Go 1.21) or `golang.org/x/exp/slog` (below Go 1.21):
+
+```go
+package main
+
+import "log/slog"
+
+// OR
+
+import "golang.org/x/exp/slog"
+
+mylib.New(slog.Default())
+```
+
+**Make sure consumers are aware that your API behaves differently on different Go versions.**
+
+Once you bump your Go version requirement to Go 1.21, you can drop the shim entirely from your code:
+
+```diff
+package mylib
+
+- import slog "github.com/sagikazarmark/slog-shim"
++ import "log/slog"
+
+func New(logger *slog.Logger) MyLib {
+ // ...
+}
+```
+
+## License
+
+The project is licensed under a [BSD-style license](LICENSE).
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/attr.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/attr.go
new file mode 100644
index 00000000000..89608bf3a75
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/attr.go
@@ -0,0 +1,74 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+ "time"
+)
+
+// An Attr is a key-value pair.
+type Attr = slog.Attr
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return slog.String(key, value)
+}
+
+// Int64 returns an Attr for an int64.
+func Int64(key string, value int64) Attr {
+ return slog.Int64(key, value)
+}
+
+// Int converts an int to an int64 and returns
+// an Attr with that value.
+func Int(key string, value int) Attr {
+ return slog.Int(key, value)
+}
+
+// Uint64 returns an Attr for a uint64.
+func Uint64(key string, v uint64) Attr {
+ return slog.Uint64(key, v)
+}
+
+// Float64 returns an Attr for a floating-point number.
+func Float64(key string, v float64) Attr {
+ return slog.Float64(key, v)
+}
+
+// Bool returns an Attr for a bool.
+func Bool(key string, v bool) Attr {
+ return slog.Bool(key, v)
+}
+
+// Time returns an Attr for a time.Time.
+// It discards the monotonic portion.
+func Time(key string, v time.Time) Attr {
+ return slog.Time(key, v)
+}
+
+// Duration returns an Attr for a time.Duration.
+func Duration(key string, v time.Duration) Attr {
+ return slog.Duration(key, v)
+}
+
+// Group returns an Attr for a Group Value.
+// The first argument is the key; the remaining arguments
+// are converted to Attrs as in [Logger.Log].
+//
+// Use Group to collect several key-value pairs under a single
+// key on a log line, or as the result of LogValue
+// in order to log a single value as multiple Attrs.
+func Group(key string, args ...any) Attr {
+ return slog.Group(key, args...)
+}
+
+// Any returns an Attr for the supplied value.
+// See [Value.AnyValue] for how values are treated.
+func Any(key string, value any) Attr {
+ return slog.Any(key, value)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/attr_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/attr_120.go
new file mode 100644
index 00000000000..b664813331e
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/attr_120.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "time"
+
+ "golang.org/x/exp/slog"
+)
+
+// An Attr is a key-value pair.
+type Attr = slog.Attr
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return slog.String(key, value)
+}
+
+// Int64 returns an Attr for an int64.
+func Int64(key string, value int64) Attr {
+ return slog.Int64(key, value)
+}
+
+// Int converts an int to an int64 and returns
+// an Attr with that value.
+func Int(key string, value int) Attr {
+ return slog.Int(key, value)
+}
+
+// Uint64 returns an Attr for a uint64.
+func Uint64(key string, v uint64) Attr {
+ return slog.Uint64(key, v)
+}
+
+// Float64 returns an Attr for a floating-point number.
+func Float64(key string, v float64) Attr {
+ return slog.Float64(key, v)
+}
+
+// Bool returns an Attr for a bool.
+func Bool(key string, v bool) Attr {
+ return slog.Bool(key, v)
+}
+
+// Time returns an Attr for a time.Time.
+// It discards the monotonic portion.
+func Time(key string, v time.Time) Attr {
+ return slog.Time(key, v)
+}
+
+// Duration returns an Attr for a time.Duration.
+func Duration(key string, v time.Duration) Attr {
+ return slog.Duration(key, v)
+}
+
+// Group returns an Attr for a Group Value.
+// The first argument is the key; the remaining arguments
+// are converted to Attrs as in [Logger.Log].
+//
+// Use Group to collect several key-value pairs under a single
+// key on a log line, or as the result of LogValue
+// in order to log a single value as multiple Attrs.
+func Group(key string, args ...any) Attr {
+ return slog.Group(key, args...)
+}
+
+// Any returns an Attr for the supplied value.
+// See [Value.AnyValue] for how values are treated.
+func Any(key string, value any) Attr {
+ return slog.Any(key, value)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/flake.lock b/test/tools/vendor/github.com/sagikazarmark/slog-shim/flake.lock
new file mode 100644
index 00000000000..7e8898e9e37
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/flake.lock
@@ -0,0 +1,273 @@
+{
+ "nodes": {
+ "devenv": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "nix": "nix",
+ "nixpkgs": "nixpkgs",
+ "pre-commit-hooks": "pre-commit-hooks"
+ },
+ "locked": {
+ "lastModified": 1694097209,
+ "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-parts": {
+ "inputs": {
+ "nixpkgs-lib": "nixpkgs-lib"
+ },
+ "locked": {
+ "lastModified": 1693611461,
+ "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=",
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1685518550,
+ "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "pre-commit-hooks",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1660459072,
+ "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "lowdown-src": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1633514407,
+ "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "type": "github"
+ }
+ },
+ "nix": {
+ "inputs": {
+ "lowdown-src": "lowdown-src",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression"
+ },
+ "locked": {
+ "lastModified": 1676545802,
+ "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "relaxed-flakes",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1678875422,
+ "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-lib": {
+ "locked": {
+ "dir": "lib",
+ "lastModified": 1693471703,
+ "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85",
+ "type": "github"
+ },
+ "original": {
+ "dir": "lib",
+ "owner": "NixOS",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-regression": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-stable": {
+ "locked": {
+ "lastModified": 1685801374,
+ "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-23.05",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+ "lastModified": 1694345580,
+ "narHash": "sha256-BbG0NUxQTz1dN/Y87yPWZc/0Kp/coJ0vM3+7sNa5kUM=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "f002de6834fdde9c864f33c1ec51da7df19cd832",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "master",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "pre-commit-hooks": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "flake-utils": "flake-utils",
+ "gitignore": "gitignore",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+ "lastModified": 1688056373,
+ "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "devenv": "devenv",
+ "flake-parts": "flake-parts",
+ "nixpkgs": "nixpkgs_2"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/flake.nix b/test/tools/vendor/github.com/sagikazarmark/slog-shim/flake.nix
new file mode 100644
index 00000000000..7239bbc2ec3
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/flake.nix
@@ -0,0 +1,57 @@
+{
+ inputs = {
+ # nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+ nixpkgs.url = "github:NixOS/nixpkgs/master";
+ flake-parts.url = "github:hercules-ci/flake-parts";
+ devenv.url = "github:cachix/devenv";
+ };
+
+ outputs = inputs@{ flake-parts, ... }:
+ flake-parts.lib.mkFlake { inherit inputs; } {
+ imports = [
+ inputs.devenv.flakeModule
+ ];
+
+ systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
+
+ perSystem = { config, self', inputs', pkgs, system, ... }: rec {
+ devenv.shells = {
+ default = {
+ languages = {
+ go.enable = true;
+ go.package = pkgs.lib.mkDefault pkgs.go_1_21;
+ };
+
+ # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
+ containers = pkgs.lib.mkForce { };
+ };
+
+ ci = devenv.shells.default;
+
+ ci_1_19 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_19;
+ };
+ };
+
+ ci_1_20 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_20;
+ };
+ };
+
+ ci_1_21 = {
+ imports = [ devenv.shells.ci ];
+
+ languages = {
+ go.package = pkgs.go_1_21;
+ };
+ };
+ };
+ };
+ };
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/handler.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/handler.go
new file mode 100644
index 00000000000..f55556ae186
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/handler.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+)
+
+// A Handler handles log records produced by a Logger..
+//
+// A typical handler may print log records to standard error,
+// or write them to a file or database, or perhaps augment them
+// with additional attributes and pass them on to another handler.
+//
+// Any of the Handler's methods may be called concurrently with itself
+// or with other methods. It is the responsibility of the Handler to
+// manage this concurrency.
+//
+// Users of the slog package should not invoke Handler methods directly.
+// They should use the methods of [Logger] instead.
+type Handler = slog.Handler
+
+// HandlerOptions are options for a TextHandler or JSONHandler.
+// A zero HandlerOptions consists entirely of default values.
+type HandlerOptions = slog.HandlerOptions
+
+// Keys for "built-in" attributes.
+const (
+ // TimeKey is the key used by the built-in handlers for the time
+ // when the log method is called. The associated Value is a [time.Time].
+ TimeKey = slog.TimeKey
+ // LevelKey is the key used by the built-in handlers for the level
+ // of the log call. The associated value is a [Level].
+ LevelKey = slog.LevelKey
+ // MessageKey is the key used by the built-in handlers for the
+ // message of the log call. The associated value is a string.
+ MessageKey = slog.MessageKey
+ // SourceKey is the key used by the built-in handlers for the source file
+ // and line of the log call. The associated value is a string.
+ SourceKey = slog.SourceKey
+)
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/handler_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/handler_120.go
new file mode 100644
index 00000000000..670057573ff
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/handler_120.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "golang.org/x/exp/slog"
+)
+
+// A Handler handles log records produced by a Logger..
+//
+// A typical handler may print log records to standard error,
+// or write them to a file or database, or perhaps augment them
+// with additional attributes and pass them on to another handler.
+//
+// Any of the Handler's methods may be called concurrently with itself
+// or with other methods. It is the responsibility of the Handler to
+// manage this concurrency.
+//
+// Users of the slog package should not invoke Handler methods directly.
+// They should use the methods of [Logger] instead.
+type Handler = slog.Handler
+
+// HandlerOptions are options for a TextHandler or JSONHandler.
+// A zero HandlerOptions consists entirely of default values.
+type HandlerOptions = slog.HandlerOptions
+
+// Keys for "built-in" attributes.
+const (
+ // TimeKey is the key used by the built-in handlers for the time
+ // when the log method is called. The associated Value is a [time.Time].
+ TimeKey = slog.TimeKey
+ // LevelKey is the key used by the built-in handlers for the level
+ // of the log call. The associated value is a [Level].
+ LevelKey = slog.LevelKey
+ // MessageKey is the key used by the built-in handlers for the
+ // message of the log call. The associated value is a string.
+ MessageKey = slog.MessageKey
+ // SourceKey is the key used by the built-in handlers for the source file
+ // and line of the log call. The associated value is a string.
+ SourceKey = slog.SourceKey
+)
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/json_handler.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/json_handler.go
new file mode 100644
index 00000000000..7c22bd81e47
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/json_handler.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "io"
+ "log/slog"
+)
+
+// JSONHandler is a Handler that writes Records to an io.Writer as
+// line-delimited JSON objects.
+type JSONHandler = slog.JSONHandler
+
+// NewJSONHandler creates a JSONHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
+ return slog.NewJSONHandler(w, opts)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go
new file mode 100644
index 00000000000..7b14f10ba94
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "io"
+
+ "golang.org/x/exp/slog"
+)
+
+// JSONHandler is a Handler that writes Records to an io.Writer as
+// line-delimited JSON objects.
+type JSONHandler = slog.JSONHandler
+
+// NewJSONHandler creates a JSONHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
+ return slog.NewJSONHandler(w, opts)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/level.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/level.go
new file mode 100644
index 00000000000..07288cf8911
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/level.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+)
+
+// A Level is the importance or severity of a log event.
+// The higher the level, the more important or severe the event.
+type Level = slog.Level
+
+// Level numbers are inherently arbitrary,
+// but we picked them to satisfy three constraints.
+// Any system can map them to another numbering scheme if it wishes.
+//
+// First, we wanted the default level to be Info, Since Levels are ints, Info is
+// the default value for int, zero.
+//
+// Second, we wanted to make it easy to use levels to specify logger verbosity.
+// Since a larger level means a more severe event, a logger that accepts events
+// with smaller (or more negative) level means a more verbose logger. Logger
+// verbosity is thus the negation of event severity, and the default verbosity
+// of 0 accepts all events at least as severe as INFO.
+//
+// Third, we wanted some room between levels to accommodate schemes with named
+// levels between ours. For example, Google Cloud Logging defines a Notice level
+// between Info and Warn. Since there are only a few of these intermediate
+// levels, the gap between the numbers need not be large. Our gap of 4 matches
+// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
+// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
+// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
+// does not. But those OpenTelemetry levels can still be represented as slog
+// Levels by using the appropriate integers.
+//
+// Names for common levels.
+const (
+ LevelDebug Level = slog.LevelDebug
+ LevelInfo Level = slog.LevelInfo
+ LevelWarn Level = slog.LevelWarn
+ LevelError Level = slog.LevelError
+)
+
+// A LevelVar is a Level variable, to allow a Handler level to change
+// dynamically.
+// It implements Leveler as well as a Set method,
+// and it is safe for use by multiple goroutines.
+// The zero LevelVar corresponds to LevelInfo.
+type LevelVar = slog.LevelVar
+
+// A Leveler provides a Level value.
+//
+// As Level itself implements Leveler, clients typically supply
+// a Level value wherever a Leveler is needed, such as in HandlerOptions.
+// Clients who need to vary the level dynamically can provide a more complex
+// Leveler implementation such as *LevelVar.
+type Leveler = slog.Leveler
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/level_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/level_120.go
new file mode 100644
index 00000000000..d3feb942038
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/level_120.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "golang.org/x/exp/slog"
+)
+
+// A Level is the importance or severity of a log event.
+// The higher the level, the more important or severe the event.
+type Level = slog.Level
+
+// Level numbers are inherently arbitrary,
+// but we picked them to satisfy three constraints.
+// Any system can map them to another numbering scheme if it wishes.
+//
+// First, we wanted the default level to be Info, Since Levels are ints, Info is
+// the default value for int, zero.
+//
+// Second, we wanted to make it easy to use levels to specify logger verbosity.
+// Since a larger level means a more severe event, a logger that accepts events
+// with smaller (or more negative) level means a more verbose logger. Logger
+// verbosity is thus the negation of event severity, and the default verbosity
+// of 0 accepts all events at least as severe as INFO.
+//
+// Third, we wanted some room between levels to accommodate schemes with named
+// levels between ours. For example, Google Cloud Logging defines a Notice level
+// between Info and Warn. Since there are only a few of these intermediate
+// levels, the gap between the numbers need not be large. Our gap of 4 matches
+// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
+// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
+// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
+// does not. But those OpenTelemetry levels can still be represented as slog
+// Levels by using the appropriate integers.
+//
+// Names for common levels.
+const (
+ LevelDebug Level = slog.LevelDebug
+ LevelInfo Level = slog.LevelInfo
+ LevelWarn Level = slog.LevelWarn
+ LevelError Level = slog.LevelError
+)
+
+// A LevelVar is a Level variable, to allow a Handler level to change
+// dynamically.
+// It implements Leveler as well as a Set method,
+// and it is safe for use by multiple goroutines.
+// The zero LevelVar corresponds to LevelInfo.
+type LevelVar = slog.LevelVar
+
+// A Leveler provides a Level value.
+//
+// As Level itself implements Leveler, clients typically supply
+// a Level value wherever a Leveler is needed, such as in HandlerOptions.
+// Clients who need to vary the level dynamically can provide a more complex
+// Leveler implementation such as *LevelVar.
+type Leveler = slog.Leveler
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/logger.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/logger.go
new file mode 100644
index 00000000000..e80036bec5e
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/logger.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "context"
+ "log"
+ "log/slog"
+)
+
+// Default returns the default Logger.
+func Default() *Logger { return slog.Default() }
+
+// SetDefault makes l the default Logger.
+// After this call, output from the log package's default Logger
+// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
+func SetDefault(l *Logger) {
+ slog.SetDefault(l)
+}
+
+// A Logger records structured information about each call to its
+// Log, Debug, Info, Warn, and Error methods.
+// For each call, it creates a Record and passes it to a Handler.
+//
+// To create a new Logger, call [New] or a Logger method
+// that begins "With".
+type Logger = slog.Logger
+
+// New creates a new Logger with the given non-nil Handler.
+func New(h Handler) *Logger {
+ return slog.New(h)
+}
+
+// With calls Logger.With on the default logger.
+func With(args ...any) *Logger {
+ return slog.With(args...)
+}
+
+// NewLogLogger returns a new log.Logger such that each call to its Output method
+// dispatches a Record to the specified handler. The logger acts as a bridge from
+// the older log API to newer structured logging handlers.
+func NewLogLogger(h Handler, level Level) *log.Logger {
+ return slog.NewLogLogger(h, level)
+}
+
+// Debug calls Logger.Debug on the default logger.
+func Debug(msg string, args ...any) {
+ slog.Debug(msg, args...)
+}
+
+// DebugContext calls Logger.DebugContext on the default logger.
+func DebugContext(ctx context.Context, msg string, args ...any) {
+ slog.DebugContext(ctx, msg, args...)
+}
+
+// Info calls Logger.Info on the default logger.
+func Info(msg string, args ...any) {
+ slog.Info(msg, args...)
+}
+
+// InfoContext calls Logger.InfoContext on the default logger.
+func InfoContext(ctx context.Context, msg string, args ...any) {
+ slog.InfoContext(ctx, msg, args...)
+}
+
+// Warn calls Logger.Warn on the default logger.
+func Warn(msg string, args ...any) {
+ slog.Warn(msg, args...)
+}
+
+// WarnContext calls Logger.WarnContext on the default logger.
+func WarnContext(ctx context.Context, msg string, args ...any) {
+ slog.WarnContext(ctx, msg, args...)
+}
+
+// Error calls Logger.Error on the default logger.
+func Error(msg string, args ...any) {
+ slog.Error(msg, args...)
+}
+
+// ErrorContext calls Logger.ErrorContext on the default logger.
+func ErrorContext(ctx context.Context, msg string, args ...any) {
+ slog.ErrorContext(ctx, msg, args...)
+}
+
+// Log calls Logger.Log on the default logger.
+func Log(ctx context.Context, level Level, msg string, args ...any) {
+ slog.Log(ctx, level, msg, args...)
+}
+
+// LogAttrs calls Logger.LogAttrs on the default logger.
+func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
+ slog.LogAttrs(ctx, level, msg, attrs...)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/logger_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/logger_120.go
new file mode 100644
index 00000000000..97ebdd5e1c0
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/logger_120.go
@@ -0,0 +1,99 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "context"
+ "log"
+
+ "golang.org/x/exp/slog"
+)
+
+// Default returns the default Logger.
+func Default() *Logger { return slog.Default() }
+
+// SetDefault makes l the default Logger.
+// After this call, output from the log package's default Logger
+// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
+func SetDefault(l *Logger) {
+ slog.SetDefault(l)
+}
+
+// A Logger records structured information about each call to its
+// Log, Debug, Info, Warn, and Error methods.
+// For each call, it creates a Record and passes it to a Handler.
+//
+// To create a new Logger, call [New] or a Logger method
+// that begins "With".
+type Logger = slog.Logger
+
+// New creates a new Logger with the given non-nil Handler.
+func New(h Handler) *Logger {
+ return slog.New(h)
+}
+
+// With calls Logger.With on the default logger.
+func With(args ...any) *Logger {
+ return slog.With(args...)
+}
+
+// NewLogLogger returns a new log.Logger such that each call to its Output method
+// dispatches a Record to the specified handler. The logger acts as a bridge from
+// the older log API to newer structured logging handlers.
+func NewLogLogger(h Handler, level Level) *log.Logger {
+ return slog.NewLogLogger(h, level)
+}
+
+// Debug calls Logger.Debug on the default logger.
+func Debug(msg string, args ...any) {
+ slog.Debug(msg, args...)
+}
+
+// DebugContext calls Logger.DebugContext on the default logger.
+func DebugContext(ctx context.Context, msg string, args ...any) {
+ slog.DebugContext(ctx, msg, args...)
+}
+
+// Info calls Logger.Info on the default logger.
+func Info(msg string, args ...any) {
+ slog.Info(msg, args...)
+}
+
+// InfoContext calls Logger.InfoContext on the default logger.
+func InfoContext(ctx context.Context, msg string, args ...any) {
+ slog.InfoContext(ctx, msg, args...)
+}
+
+// Warn calls Logger.Warn on the default logger.
+func Warn(msg string, args ...any) {
+ slog.Warn(msg, args...)
+}
+
+// WarnContext calls Logger.WarnContext on the default logger.
+func WarnContext(ctx context.Context, msg string, args ...any) {
+ slog.WarnContext(ctx, msg, args...)
+}
+
+// Error calls Logger.Error on the default logger.
+func Error(msg string, args ...any) {
+ slog.Error(msg, args...)
+}
+
+// ErrorContext calls Logger.ErrorContext on the default logger.
+func ErrorContext(ctx context.Context, msg string, args ...any) {
+ slog.ErrorContext(ctx, msg, args...)
+}
+
+// Log calls Logger.Log on the default logger.
+func Log(ctx context.Context, level Level, msg string, args ...any) {
+ slog.Log(ctx, level, msg, args...)
+}
+
+// LogAttrs calls Logger.LogAttrs on the default logger.
+func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
+ slog.LogAttrs(ctx, level, msg, attrs...)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/record.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/record.go
new file mode 100644
index 00000000000..85ad1f78420
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/record.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+ "time"
+)
+
+// A Record holds information about a log event.
+// Copies of a Record share state.
+// Do not modify a Record after handing out a copy to it.
+// Call [NewRecord] to create a new Record.
+// Use [Record.Clone] to create a copy with no shared state.
+type Record = slog.Record
+
+// NewRecord creates a Record from the given arguments.
+// Use [Record.AddAttrs] to add attributes to the Record.
+//
+// NewRecord is intended for logging APIs that want to support a [Handler] as
+// a backend.
+func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
+ return slog.NewRecord(t, level, msg, pc)
+}
+
+// Source describes the location of a line of source code.
+type Source = slog.Source
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/record_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/record_120.go
new file mode 100644
index 00000000000..c2eaf4e796c
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/record_120.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "time"
+
+ "golang.org/x/exp/slog"
+)
+
+// A Record holds information about a log event.
+// Copies of a Record share state.
+// Do not modify a Record after handing out a copy to it.
+// Call [NewRecord] to create a new Record.
+// Use [Record.Clone] to create a copy with no shared state.
+type Record = slog.Record
+
+// NewRecord creates a Record from the given arguments.
+// Use [Record.AddAttrs] to add attributes to the Record.
+//
+// NewRecord is intended for logging APIs that want to support a [Handler] as
+// a backend.
+func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
+ return slog.NewRecord(t, level, msg, pc)
+}
+
+// Source describes the location of a line of source code.
+type Source = slog.Source
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/text_handler.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/text_handler.go
new file mode 100644
index 00000000000..45f6cfcba5c
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/text_handler.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "io"
+ "log/slog"
+)
+
+// TextHandler is a Handler that writes Records to an io.Writer as a
+// sequence of key=value pairs separated by spaces and followed by a newline.
+type TextHandler = slog.TextHandler
+
+// NewTextHandler creates a TextHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
+ return slog.NewTextHandler(w, opts)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go
new file mode 100644
index 00000000000..a69d63ccea6
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "io"
+
+ "golang.org/x/exp/slog"
+)
+
+// TextHandler is a Handler that writes Records to an io.Writer as a
+// sequence of key=value pairs separated by spaces and followed by a newline.
+type TextHandler = slog.TextHandler
+
+// NewTextHandler creates a TextHandler that writes to w,
+// using the given options.
+// If opts is nil, the default options are used.
+func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
+ return slog.NewTextHandler(w, opts)
+}
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/value.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/value.go
new file mode 100644
index 00000000000..61173eb9462
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/value.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package slog
+
+import (
+ "log/slog"
+ "time"
+)
+
+// A Value can represent any Go value, but unlike type any,
+// it can represent most small values without an allocation.
+// The zero Value corresponds to nil.
+type Value = slog.Value
+
+// Kind is the kind of a Value.
+type Kind = slog.Kind
+
+// The following list is sorted alphabetically, but it's also important that
+// KindAny is 0 so that a zero Value represents nil.
+const (
+ KindAny = slog.KindAny
+ KindBool = slog.KindBool
+ KindDuration = slog.KindDuration
+ KindFloat64 = slog.KindFloat64
+ KindInt64 = slog.KindInt64
+ KindString = slog.KindString
+ KindTime = slog.KindTime
+ KindUint64 = slog.KindUint64
+ KindGroup = slog.KindGroup
+ KindLogValuer = slog.KindLogValuer
+)
+
+//////////////// Constructors
+
+// StringValue returns a new Value for a string.
+func StringValue(value string) Value {
+ return slog.StringValue(value)
+}
+
+// IntValue returns a Value for an int.
+func IntValue(v int) Value {
+ return slog.IntValue(v)
+}
+
+// Int64Value returns a Value for an int64.
+func Int64Value(v int64) Value {
+ return slog.Int64Value(v)
+}
+
+// Uint64Value returns a Value for a uint64.
+func Uint64Value(v uint64) Value {
+ return slog.Uint64Value(v)
+}
+
+// Float64Value returns a Value for a floating-point number.
+func Float64Value(v float64) Value {
+ return slog.Float64Value(v)
+}
+
+// BoolValue returns a Value for a bool.
+func BoolValue(v bool) Value {
+ return slog.BoolValue(v)
+}
+
+// TimeValue returns a Value for a time.Time.
+// It discards the monotonic portion.
+func TimeValue(v time.Time) Value {
+ return slog.TimeValue(v)
+}
+
+// DurationValue returns a Value for a time.Duration.
+func DurationValue(v time.Duration) Value {
+ return slog.DurationValue(v)
+}
+
+// GroupValue returns a new Value for a list of Attrs.
+// The caller must not subsequently mutate the argument slice.
+func GroupValue(as ...Attr) Value {
+ return slog.GroupValue(as...)
+}
+
+// AnyValue returns a Value for the supplied value.
+//
+// If the supplied value is of type Value, it is returned
+// unmodified.
+//
+// Given a value of one of Go's predeclared string, bool, or
+// (non-complex) numeric types, AnyValue returns a Value of kind
+// String, Bool, Uint64, Int64, or Float64. The width of the
+// original numeric type is not preserved.
+//
+// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
+// KindTime or KindDuration. The monotonic time is not preserved.
+//
+// For nil, or values of all other types, including named types whose
+// underlying type is numeric, AnyValue returns a value of kind KindAny.
+func AnyValue(v any) Value {
+ return slog.AnyValue(v)
+}
+
+// A LogValuer is any Go value that can convert itself into a Value for logging.
+//
+// This mechanism may be used to defer expensive operations until they are
+// needed, or to expand a single value into a sequence of components.
+type LogValuer = slog.LogValuer
diff --git a/test/tools/vendor/github.com/sagikazarmark/slog-shim/value_120.go b/test/tools/vendor/github.com/sagikazarmark/slog-shim/value_120.go
new file mode 100644
index 00000000000..0f9f871eee0
--- /dev/null
+++ b/test/tools/vendor/github.com/sagikazarmark/slog-shim/value_120.go
@@ -0,0 +1,110 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21
+
+package slog
+
+import (
+ "time"
+
+ "golang.org/x/exp/slog"
+)
+
+// A Value can represent any Go value, but unlike type any,
+// it can represent most small values without an allocation.
+// The zero Value corresponds to nil.
+type Value = slog.Value
+
+// Kind is the kind of a Value.
+type Kind = slog.Kind
+
+// The following list is sorted alphabetically, but it's also important that
+// KindAny is 0 so that a zero Value represents nil.
+const (
+ KindAny = slog.KindAny
+ KindBool = slog.KindBool
+ KindDuration = slog.KindDuration
+ KindFloat64 = slog.KindFloat64
+ KindInt64 = slog.KindInt64
+ KindString = slog.KindString
+ KindTime = slog.KindTime
+ KindUint64 = slog.KindUint64
+ KindGroup = slog.KindGroup
+ KindLogValuer = slog.KindLogValuer
+)
+
+//////////////// Constructors
+
+// StringValue returns a new Value for a string.
+func StringValue(value string) Value {
+ return slog.StringValue(value)
+}
+
+// IntValue returns a Value for an int.
+func IntValue(v int) Value {
+ return slog.IntValue(v)
+}
+
+// Int64Value returns a Value for an int64.
+func Int64Value(v int64) Value {
+ return slog.Int64Value(v)
+}
+
+// Uint64Value returns a Value for a uint64.
+func Uint64Value(v uint64) Value {
+ return slog.Uint64Value(v)
+}
+
+// Float64Value returns a Value for a floating-point number.
+func Float64Value(v float64) Value {
+ return slog.Float64Value(v)
+}
+
+// BoolValue returns a Value for a bool.
+func BoolValue(v bool) Value {
+ return slog.BoolValue(v)
+}
+
+// TimeValue returns a Value for a time.Time.
+// It discards the monotonic portion.
+func TimeValue(v time.Time) Value {
+ return slog.TimeValue(v)
+}
+
+// DurationValue returns a Value for a time.Duration.
+func DurationValue(v time.Duration) Value {
+ return slog.DurationValue(v)
+}
+
+// GroupValue returns a new Value for a list of Attrs.
+// The caller must not subsequently mutate the argument slice.
+func GroupValue(as ...Attr) Value {
+ return slog.GroupValue(as...)
+}
+
+// AnyValue returns a Value for the supplied value.
+//
+// If the supplied value is of type Value, it is returned
+// unmodified.
+//
+// Given a value of one of Go's predeclared string, bool, or
+// (non-complex) numeric types, AnyValue returns a Value of kind
+// String, Bool, Uint64, Int64, or Float64. The width of the
+// original numeric type is not preserved.
+//
+// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
+// KindTime or KindDuration. The monotonic time is not preserved.
+//
+// For nil, or values of all other types, including named types whose
+// underlying type is numeric, AnyValue returns a value of kind KindAny.
+func AnyValue(v any) Value {
+ return slog.AnyValue(v)
+}
+
+// A LogValuer is any Go value that can convert itself into a Value for logging.
+//
+// This mechanism may be used to defer expensive operations until they are
+// needed, or to expand a single value into a sequence of components.
+type LogValuer = slog.LogValuer
diff --git a/test/tools/vendor/github.com/shopspring/decimal/.gitignore b/test/tools/vendor/github.com/shopspring/decimal/.gitignore
index 8a43ce9d7b6..ff36b987f07 100644
--- a/test/tools/vendor/github.com/shopspring/decimal/.gitignore
+++ b/test/tools/vendor/github.com/shopspring/decimal/.gitignore
@@ -4,3 +4,6 @@
# IntelliJ
.idea/
*.iml
+
+# VS code
+*.code-workspace
diff --git a/test/tools/vendor/github.com/shopspring/decimal/.travis.yml b/test/tools/vendor/github.com/shopspring/decimal/.travis.yml
index 55d42b289d0..6326d40f0e9 100644
--- a/test/tools/vendor/github.com/shopspring/decimal/.travis.yml
+++ b/test/tools/vendor/github.com/shopspring/decimal/.travis.yml
@@ -1,9 +1,15 @@
language: go
+arch:
+ - amd64
+ - ppc64le
+
go:
- 1.7.x
- - 1.12.x
- - 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - 1.16.x
+ - 1.17.x
- tip
install:
diff --git a/test/tools/vendor/github.com/shopspring/decimal/CHANGELOG.md b/test/tools/vendor/github.com/shopspring/decimal/CHANGELOG.md
index 01ba02feb2c..aea61154b8c 100644
--- a/test/tools/vendor/github.com/shopspring/decimal/CHANGELOG.md
+++ b/test/tools/vendor/github.com/shopspring/decimal/CHANGELOG.md
@@ -1,4 +1,34 @@
-## Decimal v1.2.0
+## Decimal v1.3.1
+
+#### ENHANCEMENTS
+- Reduce memory allocation in case of initialization from big.Int [#252](https://github.com/shopspring/decimal/pull/252)
+
+#### BUGFIXES
+- Fix binary marshalling of decimal zero value [#253](https://github.com/shopspring/decimal/pull/253)
+
+## Decimal v1.3.0
+
+#### FEATURES
+- Add NewFromFormattedString initializer [#184](https://github.com/shopspring/decimal/pull/184)
+- Add NewNullDecimal initializer [#234](https://github.com/shopspring/decimal/pull/234)
+- Add implementation of natural exponent function (Taylor, Hull-Abraham) [#229](https://github.com/shopspring/decimal/pull/229)
+- Add RoundUp, RoundDown, RoundCeil, RoundFloor methods [#196](https://github.com/shopspring/decimal/pull/196) [#202](https://github.com/shopspring/decimal/pull/202) [#220](https://github.com/shopspring/decimal/pull/220)
+- Add XML support for NullDecimal [#192](https://github.com/shopspring/decimal/pull/192)
+- Add IsInteger method [#179](https://github.com/shopspring/decimal/pull/179)
+- Add Copy helper method [#123](https://github.com/shopspring/decimal/pull/123)
+- Add InexactFloat64 helper method [#205](https://github.com/shopspring/decimal/pull/205)
+- Add CoefficientInt64 helper method [#244](https://github.com/shopspring/decimal/pull/244)
+
+#### ENHANCEMENTS
+- Performance optimization of NewFromString init method [#198](https://github.com/shopspring/decimal/pull/198)
+- Performance optimization of Abs and Round methods [#240](https://github.com/shopspring/decimal/pull/240)
+- Additional tests (CI) for ppc64le architecture [#188](https://github.com/shopspring/decimal/pull/188)
+
+#### BUGFIXES
+- Fix rounding in FormatFloat fallback path (roundShortest method, fix taken from Go main repository) [#161](https://github.com/shopspring/decimal/pull/161)
+- Add slice range checks to UnmarshalBinary method [#232](https://github.com/shopspring/decimal/pull/232)
+
+## Decimal v1.2.0
#### BREAKING
- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172)
diff --git a/test/tools/vendor/github.com/shopspring/decimal/README.md b/test/tools/vendor/github.com/shopspring/decimal/README.md
index b70f9015935..2e35df068ea 100644
--- a/test/tools/vendor/github.com/shopspring/decimal/README.md
+++ b/test/tools/vendor/github.com/shopspring/decimal/README.md
@@ -1,6 +1,6 @@
# decimal
-[](https://travis-ci.org/shopspring/decimal) [](https://godoc.org/github.com/shopspring/decimal) [](https://goreportcard.com/report/github.com/shopspring/decimal)
+[](https://app.travis-ci.com/shopspring/decimal) [](https://godoc.org/github.com/shopspring/decimal) [](https://goreportcard.com/report/github.com/shopspring/decimal)
Arbitrary-precision fixed-point decimal numbers in go.
diff --git a/test/tools/vendor/github.com/shopspring/decimal/decimal.go b/test/tools/vendor/github.com/shopspring/decimal/decimal.go
index 801c1a0457a..84405ec1cf0 100644
--- a/test/tools/vendor/github.com/shopspring/decimal/decimal.go
+++ b/test/tools/vendor/github.com/shopspring/decimal/decimal.go
@@ -22,6 +22,7 @@ import (
"fmt"
"math"
"math/big"
+ "regexp"
"strconv"
"strings"
)
@@ -51,6 +52,10 @@ var DivisionPrecision = 16
// silently lose precision.
var MarshalJSONWithoutQuotes = false
+// ExpMaxIterations specifies the maximum number of iterations needed to calculate
+// precise natural exponent value using ExpHullAbrham method.
+var ExpMaxIterations = 1000
+
// Zero constant, to make computations faster.
// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead.
var Zero = New(0, 1)
@@ -63,6 +68,8 @@ var fiveInt = big.NewInt(5)
var tenInt = big.NewInt(10)
var twentyInt = big.NewInt(20)
+var factorials = []Decimal{New(1, 0)}
+
// Decimal represents a fixed-point decimal. It is immutable.
// number = value * 10 ^ exp
type Decimal struct {
@@ -113,7 +120,7 @@ func NewFromInt32(value int32) Decimal {
// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp
func NewFromBigInt(value *big.Int, exp int32) Decimal {
return Decimal{
- value: big.NewInt(0).Set(value),
+ value: new(big.Int).Set(value),
exp: exp,
}
}
@@ -146,23 +153,45 @@ func NewFromString(value string) (Decimal, error) {
exp = expInt
}
- parts := strings.Split(value, ".")
- if len(parts) == 1 {
+ pIndex := -1
+ vLen := len(value)
+ for i := 0; i < vLen; i++ {
+ if value[i] == '.' {
+ if pIndex > -1 {
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value)
+ }
+ pIndex = i
+ }
+ }
+
+ if pIndex == -1 {
// There is no decimal point, we can just parse the original string as
// an int
intString = value
- } else if len(parts) == 2 {
- intString = parts[0] + parts[1]
- expInt := -len(parts[1])
- exp += int64(expInt)
} else {
- return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value)
+ if pIndex+1 < vLen {
+ intString = value[:pIndex] + value[pIndex+1:]
+ } else {
+ intString = value[:pIndex]
+ }
+ expInt := -len(value[pIndex+1:])
+ exp += int64(expInt)
}
- dValue := new(big.Int)
- _, ok := dValue.SetString(intString, 10)
- if !ok {
- return Decimal{}, fmt.Errorf("can't convert %s to decimal", value)
+ var dValue *big.Int
+ // strconv.ParseInt is faster than new(big.Int).SetString so this is just a shortcut for strings we know won't overflow
+ if len(intString) <= 18 {
+ parsed64, err := strconv.ParseInt(intString, 10, 64)
+ if err != nil {
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal", value)
+ }
+ dValue = big.NewInt(parsed64)
+ } else {
+ dValue = new(big.Int)
+ _, ok := dValue.SetString(intString, 10)
+ if !ok {
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal", value)
+ }
}
if exp < math.MinInt32 || exp > math.MaxInt32 {
@@ -176,6 +205,30 @@ func NewFromString(value string) (Decimal, error) {
}, nil
}
+// NewFromFormattedString returns a new Decimal from a formatted string representation.
+// The second argument - replRegexp, is a regular expression that is used to find characters that should be
+// removed from given decimal string representation. All matched characters will be replaced with an empty string.
+//
+// Example:
+//
+// r := regexp.MustCompile("[$,]")
+// d1, err := NewFromFormattedString("$5,125.99", r)
+//
+// r2 := regexp.MustCompile("[_]")
+// d2, err := NewFromFormattedString("1_000_000", r2)
+//
+// r3 := regexp.MustCompile("[USD\\s]")
+// d3, err := NewFromFormattedString("5000 USD", r3)
+//
+func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) {
+ parsedValue := replRegexp.ReplaceAllString(value, "")
+ d, err := NewFromString(parsedValue)
+ if err != nil {
+ return Decimal{}, err
+ }
+ return d, nil
+}
+
// RequireFromString returns a new Decimal from a string representation
// or panics if NewFromString would have returned an error.
//
@@ -361,6 +414,15 @@ func NewFromFloatWithExponent(value float64, exp int32) Decimal {
}
}
+// Copy returns a copy of decimal with the same value and exponent, but a different pointer to value.
+func (d Decimal) Copy() Decimal {
+ d.ensureInitialized()
+ return Decimal{
+ value: &(*d.value),
+ exp: d.exp,
+ }
+}
+
// rescale returns a rescaled version of the decimal. Returned
// decimal may be less precise if the given exponent is bigger
// than the initial exponent of the Decimal.
@@ -410,6 +472,9 @@ func (d Decimal) rescale(exp int32) Decimal {
// Abs returns the absolute value of the decimal.
func (d Decimal) Abs() Decimal {
+ if !d.IsNegative() {
+ return d
+ }
d.ensureInitialized()
d2Value := new(big.Int).Abs(d.value)
return Decimal{
@@ -583,6 +648,207 @@ func (d Decimal) Pow(d2 Decimal) Decimal {
return temp.Mul(temp).Div(d)
}
+// ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm.
+// OverallPrecision argument specifies the overall precision of the result (integer part + decimal part).
+//
+// ExpHullAbrham is faster than ExpTaylor for small precision values, but it is much slower for large precision values.
+//
+// Example:
+//
+// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000"
+// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284"
+//
+func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) {
+ // Algorithm based on Variable precision exponential function.
+ // ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham.
+ if d.IsZero() {
+ return Decimal{oneInt, 0}, nil
+ }
+
+ currentPrecision := overallPrecision
+
+ // Algorithm does not work if currentPrecision * 23 < |x|.
+ // Precision is automatically increased in such cases, so the value can be calculated precisely.
+ // If newly calculated precision is higher than ExpMaxIterations the currentPrecision will not be changed.
+ f := d.Abs().InexactFloat64()
+ if ncp := f / 23; ncp > float64(currentPrecision) && ncp < float64(ExpMaxIterations) {
+ currentPrecision = uint32(math.Ceil(ncp))
+ }
+
+ // fail if abs(d) beyond an over/underflow threshold
+ overflowThreshold := New(23*int64(currentPrecision), 0)
+ if d.Abs().Cmp(overflowThreshold) > 0 {
+ return Decimal{}, fmt.Errorf("over/underflow threshold, exp(x) cannot be calculated precisely")
+ }
+
+ // Return 1 if abs(d) small enough; this also avoids later over/underflow
+ overflowThreshold2 := New(9, -int32(currentPrecision)-1)
+ if d.Abs().Cmp(overflowThreshold2) <= 0 {
+ return Decimal{oneInt, d.exp}, nil
+ }
+
+ // t is the smallest integer >= 0 such that the corresponding abs(d/k) < 1
+ t := d.exp + int32(d.NumDigits()) // Add d.NumDigits because the paper assumes that d.value [0.1, 1)
+
+ if t < 0 {
+ t = 0
+ }
+
+ k := New(1, t) // reduction factor
+ r := Decimal{new(big.Int).Set(d.value), d.exp - t} // reduced argument
+ p := int32(currentPrecision) + t + 2 // precision for calculating the sum
+
+ // Determine n, the number of therms for calculating sum
+ // use first Newton step (1.435p - 1.182) / log10(p/abs(r))
+ // for solving appropriate equation, along with directed
+ // roundings and simple rational bound for log10(p/abs(r))
+ rf := r.Abs().InexactFloat64()
+ pf := float64(p)
+ nf := math.Ceil((1.453*pf - 1.182) / math.Log10(pf/rf))
+ if nf > float64(ExpMaxIterations) || math.IsNaN(nf) {
+ return Decimal{}, fmt.Errorf("exact value cannot be calculated in <=ExpMaxIterations iterations")
+ }
+ n := int64(nf)
+
+ tmp := New(0, 0)
+ sum := New(1, 0)
+ one := New(1, 0)
+ for i := n - 1; i > 0; i-- {
+ tmp.value.SetInt64(i)
+ sum = sum.Mul(r.DivRound(tmp, p))
+ sum = sum.Add(one)
+ }
+
+ ki := k.IntPart()
+ res := New(1, 0)
+ for i := ki; i > 0; i-- {
+ res = res.Mul(sum)
+ }
+
+ resNumDigits := int32(res.NumDigits())
+
+ var roundDigits int32
+ if resNumDigits > abs(res.exp) {
+ roundDigits = int32(currentPrecision) - resNumDigits - res.exp
+ } else {
+ roundDigits = int32(currentPrecision)
+ }
+
+ res = res.Round(roundDigits)
+
+ return res, nil
+}
+
+// ExpTaylor calculates the natural exponent of decimal (e to the power of d) using Taylor series expansion.
+// Precision argument specifies how precise the result must be (number of digits after decimal point).
+// Negative precision is allowed.
+//
+// ExpTaylor is much faster for large precision values than ExpHullAbrham.
+//
+// Example:
+//
+// d, err := NewFromFloat(26.1).ExpTaylor(2).String()
+// d.String() // output: "216314672147.06"
+//
+// NewFromFloat(26.1).ExpTaylor(20).String()
+// d.String() // output: "216314672147.05767284062928674083"
+//
+// NewFromFloat(26.1).ExpTaylor(-10).String()
+// d.String() // output: "220000000000"
+//
+func (d Decimal) ExpTaylor(precision int32) (Decimal, error) {
+ // Note(mwoss): Implementation can be optimized by exclusively using big.Int API only
+ if d.IsZero() {
+ return Decimal{oneInt, 0}.Round(precision), nil
+ }
+
+ var epsilon Decimal
+ var divPrecision int32
+ if precision < 0 {
+ epsilon = New(1, -1)
+ divPrecision = 8
+ } else {
+ epsilon = New(1, -precision-1)
+ divPrecision = precision + 1
+ }
+
+ decAbs := d.Abs()
+ pow := d.Abs()
+ factorial := New(1, 0)
+
+ result := New(1, 0)
+
+ for i := int64(1); ; {
+ step := pow.DivRound(factorial, divPrecision)
+ result = result.Add(step)
+
+ // Stop Taylor series when current step is smaller than epsilon
+ if step.Cmp(epsilon) < 0 {
+ break
+ }
+
+ pow = pow.Mul(decAbs)
+
+ i++
+
+ // Calculate next factorial number or retrieve cached value
+ if len(factorials) >= int(i) && !factorials[i-1].IsZero() {
+ factorial = factorials[i-1]
+ } else {
+ // To avoid any race conditions, firstly the zero value is appended to a slice to create
+ // a spot for newly calculated factorial. After that, the zero value is replaced by calculated
+ // factorial using the index notation.
+ factorial = factorials[i-2].Mul(New(i, 0))
+ factorials = append(factorials, Zero)
+ factorials[i-1] = factorial
+ }
+ }
+
+ if d.Sign() < 0 {
+ result = New(1, 0).DivRound(result, precision+1)
+ }
+
+ result = result.Round(precision)
+ return result, nil
+}
+
+// NumDigits returns the number of digits of the decimal coefficient (d.Value)
+// Note: Current implementation is extremely slow for large decimals and/or decimals with large fractional part
+func (d Decimal) NumDigits() int {
+ // Note(mwoss): It can be optimized, unnecessary cast of big.Int to string
+ if d.IsNegative() {
+ return len(d.value.String()) - 1
+ }
+ return len(d.value.String())
+}
+
+// IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false.
+func (d Decimal) IsInteger() bool {
+ // The most typical case, all decimal with exponent higher or equal 0 can be represented as integer
+ if d.exp >= 0 {
+ return true
+ }
+ // When the exponent is negative we have to check every number after the decimal place
+ // If all of them are zeroes, we are sure that given decimal can be represented as an integer
+ var r big.Int
+ q := new(big.Int).Set(d.value)
+ for z := abs(d.exp); z > 0; z-- {
+ q.QuoRem(q, tenInt, &r)
+ if r.Cmp(zeroInt) != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Abs calculates absolute value of any int32. Used for calculating absolute value of decimal's exponent.
+func abs(n int32) int32 {
+ if n < 0 {
+ return -n
+ }
+ return n
+}
+
// Cmp compares the numbers represented by d and d2 and returns:
//
// -1 if d < d2
@@ -679,12 +945,18 @@ func (d Decimal) Exponent() int32 {
return d.exp
}
-// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent()
+// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent()
func (d Decimal) Coefficient() *big.Int {
d.ensureInitialized()
- // we copy the coefficient so that mutating the result does not mutate the
- // Decimal.
- return big.NewInt(0).Set(d.value)
+ // we copy the coefficient so that mutating the result does not mutate the Decimal.
+ return new(big.Int).Set(d.value)
+}
+
+// CoefficientInt64 returns the coefficient of the decimal as int64. It is scaled by 10^Exponent()
+// If coefficient cannot be represented in an int64, the result will be undefined.
+func (d Decimal) CoefficientInt64() int64 {
+ d.ensureInitialized()
+ return d.value.Int64()
}
// IntPart returns the integer component of the decimal.
@@ -730,6 +1002,13 @@ func (d Decimal) Float64() (f float64, exact bool) {
return d.Rat().Float64()
}
+// InexactFloat64 returns the nearest float64 value for d.
+// It doesn't indicate if the returned value represents d exactly.
+func (d Decimal) InexactFloat64() float64 {
+ f, _ := d.Float64()
+ return f
+}
+
// String returns the string representation of the decimal
// with the fixed point.
//
@@ -798,6 +1077,9 @@ func (d Decimal) StringFixedCash(interval uint8) string {
// NewFromFloat(545).Round(-1).String() // output: "550"
//
func (d Decimal) Round(places int32) Decimal {
+ if d.exp == -places {
+ return d
+ }
// truncate to places + 1
ret := d.rescale(-places - 1)
@@ -818,6 +1100,107 @@ func (d Decimal) Round(places int32) Decimal {
return ret
}
+// RoundCeil rounds the decimal towards +infinity.
+//
+// Example:
+//
+// NewFromFloat(545).RoundCeil(-2).String() // output: "600"
+// NewFromFloat(500).RoundCeil(-2).String() // output: "500"
+// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11"
+// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.5"
+//
+func (d Decimal) RoundCeil(places int32) Decimal {
+ if d.exp >= -places {
+ return d
+ }
+
+ rescaled := d.rescale(-places)
+ if d.Equal(rescaled) {
+ return d
+ }
+
+ if d.value.Sign() > 0 {
+ rescaled.value.Add(rescaled.value, oneInt)
+ }
+
+ return rescaled
+}
+
+// RoundFloor rounds the decimal towards -infinity.
+//
+// Example:
+//
+// NewFromFloat(545).RoundFloor(-2).String() // output: "500"
+// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500"
+// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1"
+// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.4"
+//
+func (d Decimal) RoundFloor(places int32) Decimal {
+ if d.exp >= -places {
+ return d
+ }
+
+ rescaled := d.rescale(-places)
+ if d.Equal(rescaled) {
+ return d
+ }
+
+ if d.value.Sign() < 0 {
+ rescaled.value.Sub(rescaled.value, oneInt)
+ }
+
+ return rescaled
+}
+
+// RoundUp rounds the decimal away from zero.
+//
+// Example:
+//
+// NewFromFloat(545).RoundUp(-2).String() // output: "600"
+// NewFromFloat(500).RoundUp(-2).String() // output: "500"
+// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11"
+// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.4"
+//
+func (d Decimal) RoundUp(places int32) Decimal {
+ if d.exp >= -places {
+ return d
+ }
+
+ rescaled := d.rescale(-places)
+ if d.Equal(rescaled) {
+ return d
+ }
+
+ if d.value.Sign() > 0 {
+ rescaled.value.Add(rescaled.value, oneInt)
+ } else if d.value.Sign() < 0 {
+ rescaled.value.Sub(rescaled.value, oneInt)
+ }
+
+ return rescaled
+}
+
+// RoundDown rounds the decimal towards zero.
+//
+// Example:
+//
+// NewFromFloat(545).RoundDown(-2).String() // output: "500"
+// NewFromFloat(-500).RoundDown(-2).String() // output: "-500"
+// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1"
+// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.5"
+//
+func (d Decimal) RoundDown(places int32) Decimal {
+ if d.exp >= -places {
+ return d
+ }
+
+ rescaled := d.rescale(-places)
+ if d.Equal(rescaled) {
+ return d
+ }
+ return rescaled
+}
+
// RoundBank rounds the decimal to places decimal places.
// If the final digit to round is equidistant from the nearest two integers the
// rounded value is taken as the even number
@@ -826,12 +1209,12 @@ func (d Decimal) Round(places int32) Decimal {
//
// Examples:
//
-// NewFromFloat(5.45).Round(1).String() // output: "5.4"
-// NewFromFloat(545).Round(-1).String() // output: "540"
-// NewFromFloat(5.46).Round(1).String() // output: "5.5"
-// NewFromFloat(546).Round(-1).String() // output: "550"
-// NewFromFloat(5.55).Round(1).String() // output: "5.6"
-// NewFromFloat(555).Round(-1).String() // output: "560"
+// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4"
+// NewFromFloat(545).RoundBank(-1).String() // output: "540"
+// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5"
+// NewFromFloat(546).RoundBank(-1).String() // output: "550"
+// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6"
+// NewFromFloat(555).RoundBank(-1).String() // output: "560"
//
func (d Decimal) RoundBank(places int32) Decimal {
@@ -970,12 +1353,22 @@ func (d Decimal) MarshalJSON() ([]byte, error) {
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation
// is already used when encoding to text, this method stores that string as []byte
func (d *Decimal) UnmarshalBinary(data []byte) error {
+ // Verify we have at least 4 bytes for the exponent. The GOB encoded value
+ // may be empty.
+ if len(data) < 4 {
+ return fmt.Errorf("error decoding binary %v: expected at least 4 bytes, got %d", data, len(data))
+ }
+
// Extract the exponent
d.exp = int32(binary.BigEndian.Uint32(data[:4]))
// Extract the value
d.value = new(big.Int)
- return d.value.GobDecode(data[4:])
+ if err := d.value.GobDecode(data[4:]); err != nil {
+ return fmt.Errorf("error decoding binary %v: %s", data, err)
+ }
+
+ return nil
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
@@ -1219,6 +1612,13 @@ type NullDecimal struct {
Valid bool
}
+func NewNullDecimal(d Decimal) NullDecimal {
+ return NullDecimal{
+ Decimal: d,
+ Valid: true,
+ }
+}
+
// Scan implements the sql.Scanner interface for database deserialization.
func (d *NullDecimal) Scan(value interface{}) error {
if value == nil {
@@ -1255,6 +1655,33 @@ func (d NullDecimal) MarshalJSON() ([]byte, error) {
return d.Decimal.MarshalJSON()
}
+// UnmarshalText implements the encoding.TextUnmarshaler interface for XML
+// deserialization
+func (d *NullDecimal) UnmarshalText(text []byte) error {
+ str := string(text)
+
+ // check for empty XML or XML without body e.g.,
+ if str == "" {
+ d.Valid = false
+ return nil
+ }
+ if err := d.Decimal.UnmarshalText(text); err != nil {
+ d.Valid = false
+ return err
+ }
+ d.Valid = true
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface for XML
+// serialization.
+func (d NullDecimal) MarshalText() (text []byte, err error) {
+ if !d.Valid {
+ return []byte{}, nil
+ }
+ return d.Decimal.MarshalText()
+}
+
// Trig functions
// Atan returns the arctangent, in radians, of x.
diff --git a/test/tools/vendor/github.com/shopspring/decimal/rounding.go b/test/tools/vendor/github.com/shopspring/decimal/rounding.go
index 8008f55cb98..d4b0cd00795 100644
--- a/test/tools/vendor/github.com/shopspring/decimal/rounding.go
+++ b/test/tools/vendor/github.com/shopspring/decimal/rounding.go
@@ -80,39 +80,80 @@ func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
// would round to the original mantissa and not the neighbors.
inclusive := mant%2 == 0
+ // As we walk the digits we want to know whether rounding up would fall
+ // within the upper bound. This is tracked by upperdelta:
+ //
+ // If upperdelta == 0, the digits of d and upper are the same so far.
+ //
+ // If upperdelta == 1, we saw a difference of 1 between d and upper on a
+ // previous digit and subsequently only 9s for d and 0s for upper.
+ // (Thus rounding up may fall outside the bound, if it is exclusive.)
+ //
+ // If upperdelta == 2, then the difference is greater than 1
+ // and we know that rounding up falls within the bound.
+ var upperdelta uint8
+
// Now we can figure out the minimum number of digits required.
// Walk along until d has distinguished itself from upper and lower.
- for i := 0; i < d.nd; i++ {
+ for ui := 0; ; ui++ {
+ // lower, d, and upper may have the decimal points at different
+ // places. In this case upper is the longest, so we iterate from
+ // ui==0 and start li and mi at (possibly) -1.
+ mi := ui - upper.dp + d.dp
+ if mi >= d.nd {
+ break
+ }
+ li := ui - upper.dp + lower.dp
l := byte('0') // lower digit
- if i < lower.nd {
- l = lower.d[i]
+ if li >= 0 && li < lower.nd {
+ l = lower.d[li]
+ }
+ m := byte('0') // middle digit
+ if mi >= 0 {
+ m = d.d[mi]
}
- m := d.d[i] // middle digit
u := byte('0') // upper digit
- if i < upper.nd {
- u = upper.d[i]
+ if ui < upper.nd {
+ u = upper.d[ui]
}
// Okay to round down (truncate) if lower has a different digit
// or if lower is inclusive and is exactly the result of rounding
// down (i.e., and we have reached the final digit of lower).
- okdown := l != m || inclusive && i+1 == lower.nd
+ okdown := l != m || inclusive && li+1 == lower.nd
+ switch {
+ case upperdelta == 0 && m+1 < u:
+ // Example:
+ // m = 12345xxx
+ // u = 12347xxx
+ upperdelta = 2
+ case upperdelta == 0 && m != u:
+ // Example:
+ // m = 12345xxx
+ // u = 12346xxx
+ upperdelta = 1
+ case upperdelta == 1 && (m != '9' || u != '0'):
+ // Example:
+ // m = 1234598x
+ // u = 1234600x
+ upperdelta = 2
+ }
// Okay to round up if upper has a different digit and either upper
// is inclusive or upper is bigger than the result of rounding up.
- okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd)
+ okup := upperdelta > 0 && (inclusive || upperdelta > 1 || ui+1 < upper.nd)
// If it's okay to do either, then round to the nearest one.
// If it's okay to do only one, do it.
switch {
case okdown && okup:
- d.Round(i + 1)
+ d.Round(mi + 1)
return
case okdown:
- d.RoundDown(i + 1)
+ d.RoundDown(mi + 1)
return
case okup:
- d.RoundUp(i + 1)
+ d.RoundUp(mi + 1)
return
}
}
diff --git a/test/tools/vendor/github.com/sourcegraph/conc/.golangci.yml b/test/tools/vendor/github.com/sourcegraph/conc/.golangci.yml
new file mode 100644
index 00000000000..ae65a760a92
--- /dev/null
+++ b/test/tools/vendor/github.com/sourcegraph/conc/.golangci.yml
@@ -0,0 +1,11 @@
+linters:
+ disable-all: true
+ enable:
+ - errcheck
+ - godot
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - typecheck
+ - unused
diff --git a/test/tools/vendor/github.com/spf13/jwalterweatherman/LICENSE b/test/tools/vendor/github.com/sourcegraph/conc/LICENSE
similarity index 94%
rename from test/tools/vendor/github.com/spf13/jwalterweatherman/LICENSE
rename to test/tools/vendor/github.com/sourcegraph/conc/LICENSE
index 4527efb9c06..1081f4ef4a4 100644
--- a/test/tools/vendor/github.com/spf13/jwalterweatherman/LICENSE
+++ b/test/tools/vendor/github.com/sourcegraph/conc/LICENSE
@@ -1,6 +1,6 @@
-The MIT License (MIT)
+MIT License
-Copyright (c) 2014 Steve Francia
+Copyright (c) 2023 Sourcegraph
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
+SOFTWARE.
diff --git a/test/tools/vendor/github.com/sourcegraph/conc/README.md b/test/tools/vendor/github.com/sourcegraph/conc/README.md
new file mode 100644
index 00000000000..1c87c3c9699
--- /dev/null
+++ b/test/tools/vendor/github.com/sourcegraph/conc/README.md
@@ -0,0 +1,464 @@
+
+
+# `conc`: better structured concurrency for go
+
+[](https://pkg.go.dev/github.com/sourcegraph/conc)
+[](https://sourcegraph.com/github.com/sourcegraph/conc)
+[](https://goreportcard.com/report/github.com/sourcegraph/conc)
+[](https://codecov.io/gh/sourcegraph/conc)
+[](https://discord.gg/bvXQXmtRjN)
+
+`conc` is your toolbelt for structured concurrency in go, making common tasks
+easier and safer.
+
+```sh
+go get github.com/sourcegraph/conc
+```
+
+# At a glance
+
+- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup`
+- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner
+- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results
+- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible
+- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure
+- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks
+- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice
+- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice
+- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines
+
+All pools are created with
+[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New)
+or
+[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults),
+then configured with methods:
+
+- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool
+- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors
+- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error
+- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error
+- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored
+
+# Goals
+
+The main goals of the package are:
+1) Make it harder to leak goroutines
+2) Handle panics gracefully
+3) Make concurrent code easier to read
+
+## Goal #1: Make it harder to leak goroutines
+
+A common pain point when working with goroutines is cleaning them up. It's
+really easy to fire off a `go` statement and fail to properly wait for it to
+complete.
+
+`conc` takes the opinionated stance that all concurrency should be scoped.
+That is, goroutines should have an owner and that owner should always
+ensure that its owned goroutines exit properly.
+
+In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines
+are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and
+`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out
+of scope.
+
+In some cases, you might want a spawned goroutine to outlast the scope of the
+caller. In that case, you could pass a `WaitGroup` into the spawning function.
+
+```go
+func main() {
+ var wg conc.WaitGroup
+ defer wg.Wait()
+
+ startTheThing(&wg)
+}
+
+func startTheThing(wg *conc.WaitGroup) {
+ wg.Go(func() { ... })
+}
+```
+
+For some more discussion on why scoped concurrency is nice, check out [this
+blog
+post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/).
+
+## Goal #2: Handle panics gracefully
+
+A frequent problem with goroutines in long-running applications is handling
+panics. A goroutine spawned without a panic handler will crash the whole process
+on panic. This is usually undesirable.
+
+However, if you do add a panic handler to a goroutine, what do you do with the
+panic once you catch it? Some options:
+1) Ignore it
+2) Log it
+3) Turn it into an error and return that to the goroutine spawner
+4) Propagate the panic to the goroutine spawner
+
+Ignoring panics is a bad idea since panics usually mean there is actually
+something wrong and someone should fix it.
+
+Just logging panics isn't great either because then there is no indication to the spawner
+that something bad happened, and it might just continue on as normal even though your
+program is in a really bad state.
+
+Both (3) and (4) are reasonable options, but both require the goroutine to have
+an owner that can actually receive the message that something went wrong. This
+is generally not true with a goroutine spawned with `go`, but in the `conc`
+package, all goroutines have an owner that must collect the spawned goroutine.
+In the conc package, any call to `Wait()` will panic if any of the spawned goroutines
+panicked. Additionally, it decorates the panic value with a stacktrace from the child
+goroutine so that you don't lose information about what caused the panic.
+
+Doing this all correctly every time you spawn something with `go` is not
+trivial and it requires a lot of boilerplate that makes the important parts of
+the code more difficult to read, so `conc` does this for you.
+
+
+
+```go
+func main() {
+ var wg conc.WaitGroup
+ wg.Go(doSomethingThatMightPanic)
+ // panics with a nice stacktrace
+ wg.Wait()
+}
+```
+
+
+
+
+## Goal #3: Make concurrent code easier to read
+
+Doing concurrency correctly is difficult. Doing it in a way that doesn't
+obfuscate what the code is actually doing is more difficult. The `conc` package
+attempts to make common operations easier by abstracting as much boilerplate
+complexity as possible.
+
+Want to run a set of concurrent tasks with a bounded set of goroutines? Use
+`pool.New()`. Want to process an ordered stream of results concurrently, but
+still maintain order? Try `stream.New()`. What about a concurrent map over
+a slice? Take a peek at `iter.Map()`.
+
+Browse some examples below for some comparisons with doing these by hand.
+
+# Examples
+
+Each of these examples forgoes propagating panics for simplicity. To see
+what kind of complexity that would add, check out the "Goal #2" header above.
+
+Spawn a set of goroutines and waiting for them to finish:
+
+
+
+
stdlib
+
conc
+
+
+
+
+```go
+func main() {
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // crashes on panic!
+ doSomething()
+ }()
+ }
+ wg.Wait()
+}
+```
+
+
+
+```go
+func main() {
+ var wg conc.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Go(doSomething)
+ }
+ wg.Wait()
+}
+```
+
+
+
+
+Process each element of a stream in a static pool of goroutines:
+
+
+
+
stdlib
+
conc
+
+
+
+
+```go
+func process(stream chan int) {
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for elem := range stream {
+ handle(elem)
+ }
+ }()
+ }
+ wg.Wait()
+}
+```
+
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/test/tools/vendor/golang.org/x/exp/slices/zsortordered.go b/test/tools/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 00000000000..99b47c3986a
--- /dev/null
+++ b/test/tools/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
-
- > **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.48.
- type: "string"
- example: ""
- Domainname:
- description: |
- The domain name to use for the container.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.48.
- type: "string"
- example: ""
User:
description: "The user that commands are run as inside the container."
type: "string"
example: "web:web"
- AttachStdin:
- description: |
- Whether to attach to `stdin`.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
- type: "boolean"
- default: false
- example: false
- AttachStdout:
- description: |
- Whether to attach to `stdout`.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
- type: "boolean"
- default: false
- example: false
- AttachStderr:
- description: |
- Whether to attach to `stderr`.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
- type: "boolean"
- default: false
- example: false
ExposedPorts:
description: |
An object mapping ports to an empty object in the form:
@@ -1501,39 +1448,6 @@ definitions:
"80/tcp": {},
"443/tcp": {}
}
- Tty:
- description: |
- Attach standard streams to a TTY, including `stdin` if it is not closed.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
- type: "boolean"
- default: false
- example: false
- OpenStdin:
- description: |
- Open `stdin`
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
- type: "boolean"
- default: false
- example: false
- StdinOnce:
- description: |
- Close `stdin` after one attached client disconnects.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always false. It must not be used, and will be removed in API v1.48.
- type: "boolean"
- default: false
- example: false
Env:
description: |
A list of environment variables to set inside the container in the
@@ -1559,18 +1473,6 @@ definitions:
default: false
example: false
x-nullable: true
- Image:
- description: |
- The name (or reference) of the image to use when creating the container,
- or which was used when the container was created.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always empty. It must not be used, and will be removed in API v1.48.
- type: "string"
- default: ""
- example: ""
Volumes:
description: |
An object mapping mount point paths inside the container to empty
@@ -1599,30 +1501,6 @@ definitions:
items:
type: "string"
example: []
- NetworkDisabled:
- description: |
- Disable networking for the container.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.48.
- type: "boolean"
- default: false
- example: false
- x-nullable: true
- MacAddress:
- description: |
- MAC address of the container.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.48.
- type: "string"
- default: ""
- example: ""
- x-nullable: true
OnBuild:
description: |
`ONBUILD` metadata that were defined in the image's `Dockerfile`.
@@ -1645,17 +1523,6 @@ definitions:
type: "string"
example: "SIGTERM"
x-nullable: true
- StopTimeout:
- description: |
- Timeout to stop a container in seconds.
-
-
-
- > **Deprecated**: this field is not part of the image specification and is
- > always omitted. It must not be used, and will be removed in API v1.48.
- type: "integer"
- default: 10
- x-nullable: true
Shell:
description: |
Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
@@ -1666,19 +1533,11 @@ definitions:
example: ["/bin/sh", "-c"]
# FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed.
example:
- "Hostname": ""
- "Domainname": ""
"User": "web:web"
- "AttachStdin": false
- "AttachStdout": false
- "AttachStderr": false
"ExposedPorts": {
"80/tcp": {},
"443/tcp": {}
}
- "Tty": false
- "OpenStdin": false
- "StdinOnce": false
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
"Cmd": ["/bin/sh"]
"Healthcheck": {
@@ -1690,7 +1549,6 @@ definitions:
"StartInterval": 0
}
"ArgsEscaped": true
- "Image": ""
"Volumes": {
"/app/data": {},
"/app/config": {}
@@ -2956,6 +2814,23 @@ definitions:
progressDetail:
$ref: "#/definitions/ProgressDetail"
+ DeviceInfo:
+ type: "object"
+ description: |
+ DeviceInfo represents a device that can be used by a container.
+ properties:
+ Source:
+ type: "string"
+ example: "cdi"
+ description: |
+ The origin device driver.
+ ID:
+ type: "string"
+ example: "vendor.com/gpu=0"
+ description: |
+ The unique identifier for the device within its source driver.
+ For CDI devices, this would be an FQDN like "vendor.com/gpu=0".
+
ErrorDetail:
type: "object"
properties:
@@ -6858,6 +6733,15 @@ definitions:
example: "24"
FirewallBackend:
$ref: "#/definitions/FirewallInfo"
+ DiscoveredDevices:
+ description: |
+ List of devices discovered by device drivers.
+
+ Each device includes information about its source driver, kind, name,
+ and additional driver-specific attributes.
+ type: "array"
+ items:
+ $ref: "#/definitions/DeviceInfo"
Warnings:
description: |
List of warnings / informational messages about missing features, or
@@ -9934,6 +9818,18 @@ paths:
description: "Do not delete untagged parent images"
type: "boolean"
default: false
+ - name: "platforms"
+ in: "query"
+ description: |
+ Select platform-specific content to delete.
+ Multiple values are accepted.
+ Each platform is a OCI platform encoded as a JSON string.
+ type: "array"
+ items:
+ # This should be OCIPlatform
+ # but $ref is not supported for array in query in Swagger 2.0
+ # $ref: "#/definitions/OCIPlatform"
+ type: "string"
tags: ["Image"]
/images/search:
get:
diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go
index 4982bce3668..9682ff69415 100644
--- a/vendor/github.com/docker/docker/api/types/backend/backend.go
+++ b/vendor/github.com/docker/docker/api/types/backend/backend.go
@@ -160,7 +160,7 @@ type ImageInspectOpts struct {
type CommitConfig struct {
Author string
Comment string
- Config *container.Config
+ Config *container.Config // TODO(thaJeztah); change this to [dockerspec.DockerOCIImageConfig]
ContainerConfig *container.Config
ContainerID string
ContainerMountLabel string
diff --git a/vendor/github.com/docker/docker/api/types/backend/build.go b/vendor/github.com/docker/docker/api/types/backend/build.go
index 91715d0b91b..41d7d64165d 100644
--- a/vendor/github.com/docker/docker/api/types/backend/build.go
+++ b/vendor/github.com/docker/docker/api/types/backend/build.go
@@ -3,7 +3,7 @@ package backend // import "github.com/docker/docker/api/types/backend"
import (
"io"
- "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/pkg/streamformatter"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@@ -34,7 +34,7 @@ type ProgressWriter struct {
type BuildConfig struct {
Source io.ReadCloser
ProgressWriter ProgressWriter
- Options *types.ImageBuildOptions
+ Options *build.ImageBuildOptions
}
// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer
diff --git a/vendor/github.com/docker/docker/api/types/build/build.go b/vendor/github.com/docker/docker/api/types/build/build.go
new file mode 100644
index 00000000000..c43a0e21ea7
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/build/build.go
@@ -0,0 +1,91 @@
+package build
+
+import (
+ "io"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/registry"
+)
+
+// BuilderVersion sets the version of underlying builder to use
+type BuilderVersion string
+
+const (
+ // BuilderV1 is the first generation builder in docker daemon
+ BuilderV1 BuilderVersion = "1"
+ // BuilderBuildKit is builder based on moby/buildkit project
+ BuilderBuildKit BuilderVersion = "2"
+)
+
+// Result contains the image id of a successful build.
+type Result struct {
+ ID string
+}
+
+// ImageBuildOptions holds the information
+// necessary to build images.
+type ImageBuildOptions struct {
+ Tags []string
+ SuppressOutput bool
+ RemoteContext string
+ NoCache bool
+ Remove bool
+ ForceRemove bool
+ PullParent bool
+ Isolation container.Isolation
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares int64
+ CPUQuota int64
+ CPUPeriod int64
+ Memory int64
+ MemorySwap int64
+ CgroupParent string
+ NetworkMode string
+ ShmSize int64
+ Dockerfile string
+ Ulimits []*container.Ulimit
+ // BuildArgs needs to be a *string instead of just a string so that
+ // we can tell the difference between "" (empty string) and no value
+ // at all (nil). See the parsing of buildArgs in
+ // api/server/router/build/build_routes.go for even more info.
+ BuildArgs map[string]*string
+ AuthConfigs map[string]registry.AuthConfig
+ Context io.Reader
+ Labels map[string]string
+ // squash the resulting image's layers to the parent
+ // preserves the original image and creates a new one from the parent with all
+ // the changes applied to a single layer
+ Squash bool
+ // CacheFrom specifies images that are used for matching cache. Images
+ // specified here do not need to have a valid parent chain to match cache.
+ CacheFrom []string
+ SecurityOpt []string
+ ExtraHosts []string // List of extra hosts
+ Target string
+ SessionID string
+ Platform string
+ // Version specifies the version of the underlying builder to use
+ Version BuilderVersion
+ // BuildID is an optional identifier that can be passed together with the
+ // build request. The same identifier can be used to gracefully cancel the
+ // build with the cancel request.
+ BuildID string
+ // Outputs defines configurations for exporting build results. Only supported
+ // in BuildKit mode
+ Outputs []ImageBuildOutput
+}
+
+// ImageBuildOutput defines configuration for exporting a build result
+type ImageBuildOutput struct {
+ Type string
+ Attrs map[string]string
+}
+
+// ImageBuildResponse holds information
+// returned by a server after building
+// an image.
+type ImageBuildResponse struct {
+ Body io.ReadCloser
+ OSType string
+}
diff --git a/vendor/github.com/docker/docker/api/types/build/cache.go b/vendor/github.com/docker/docker/api/types/build/cache.go
new file mode 100644
index 00000000000..42c84045736
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/build/cache.go
@@ -0,0 +1,52 @@
+package build
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/filters"
+)
+
+// CacheRecord contains information about a build cache record.
+type CacheRecord struct {
+ // ID is the unique ID of the build cache record.
+ ID string
+ // Parent is the ID of the parent build cache record.
+ //
+ // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
+ Parent string `json:"Parent,omitempty"`
+ // Parents is the list of parent build cache record IDs.
+ Parents []string `json:" Parents,omitempty"`
+ // Type is the cache record type.
+ Type string
+ // Description is a description of the build-step that produced the build cache.
+ Description string
+ // InUse indicates if the build cache is in use.
+ InUse bool
+ // Shared indicates if the build cache is shared.
+ Shared bool
+ // Size is the amount of disk space used by the build cache (in bytes).
+ Size int64
+ // CreatedAt is the date and time at which the build cache was created.
+ CreatedAt time.Time
+ // LastUsedAt is the date and time at which the build cache was last used.
+ LastUsedAt *time.Time
+ UsageCount int
+}
+
+// CachePruneOptions hold parameters to prune the build cache.
+type CachePruneOptions struct {
+ All bool
+ ReservedSpace int64
+ MaxUsedSpace int64
+ MinFreeSpace int64
+ Filters filters.Args
+
+ KeepStorage int64 // Deprecated: deprecated in API 1.48.
+}
+
+// CachePruneReport contains the response for Engine API:
+// POST "/build/prune"
+type CachePruneReport struct {
+ CachesDeleted []string
+ SpaceReclaimed uint64
+}
diff --git a/vendor/github.com/docker/docker/api/types/build/disk_usage.go b/vendor/github.com/docker/docker/api/types/build/disk_usage.go
new file mode 100644
index 00000000000..e969b6d615f
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/build/disk_usage.go
@@ -0,0 +1,8 @@
+package build
+
+// CacheDiskUsage contains disk usage for the build cache.
+type CacheDiskUsage struct {
+ TotalSize int64
+ Reclaimable int64
+ Items []*CacheRecord
+}
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
index dce8260f328..a3704edd348 100644
--- a/vendor/github.com/docker/docker/api/types/client.go
+++ b/vendor/github.com/docker/docker/api/types/client.go
@@ -3,12 +3,7 @@ package types // import "github.com/docker/docker/api/types"
import (
"bufio"
"context"
- "io"
"net"
-
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/registry"
)
// NewHijackedResponse initializes a [HijackedResponse] type.
@@ -51,165 +46,6 @@ func (h *HijackedResponse) CloseWrite() error {
return nil
}
-// ImageBuildOptions holds the information
-// necessary to build images.
-type ImageBuildOptions struct {
- Tags []string
- SuppressOutput bool
- RemoteContext string
- NoCache bool
- Remove bool
- ForceRemove bool
- PullParent bool
- Isolation container.Isolation
- CPUSetCPUs string
- CPUSetMems string
- CPUShares int64
- CPUQuota int64
- CPUPeriod int64
- Memory int64
- MemorySwap int64
- CgroupParent string
- NetworkMode string
- ShmSize int64
- Dockerfile string
- Ulimits []*container.Ulimit
- // BuildArgs needs to be a *string instead of just a string so that
- // we can tell the difference between "" (empty string) and no value
- // at all (nil). See the parsing of buildArgs in
- // api/server/router/build/build_routes.go for even more info.
- BuildArgs map[string]*string
- AuthConfigs map[string]registry.AuthConfig
- Context io.Reader
- Labels map[string]string
- // squash the resulting image's layers to the parent
- // preserves the original image and creates a new one from the parent with all
- // the changes applied to a single layer
- Squash bool
- // CacheFrom specifies images that are used for matching cache. Images
- // specified here do not need to have a valid parent chain to match cache.
- CacheFrom []string
- SecurityOpt []string
- ExtraHosts []string // List of extra hosts
- Target string
- SessionID string
- Platform string
- // Version specifies the version of the underlying builder to use
- Version BuilderVersion
- // BuildID is an optional identifier that can be passed together with the
- // build request. The same identifier can be used to gracefully cancel the
- // build with the cancel request.
- BuildID string
- // Outputs defines configurations for exporting build results. Only supported
- // in BuildKit mode
- Outputs []ImageBuildOutput
-}
-
-// ImageBuildOutput defines configuration for exporting a build result
-type ImageBuildOutput struct {
- Type string
- Attrs map[string]string
-}
-
-// BuilderVersion sets the version of underlying builder to use
-type BuilderVersion string
-
-const (
- // BuilderV1 is the first generation builder in docker daemon
- BuilderV1 BuilderVersion = "1"
- // BuilderBuildKit is builder based on moby/buildkit project
- BuilderBuildKit BuilderVersion = "2"
-)
-
-// ImageBuildResponse holds information
-// returned by a server after building
-// an image.
-type ImageBuildResponse struct {
- Body io.ReadCloser
- OSType string
-}
-
-// NodeListOptions holds parameters to list nodes with.
-type NodeListOptions struct {
- Filters filters.Args
-}
-
-// NodeRemoveOptions holds parameters to remove nodes with.
-type NodeRemoveOptions struct {
- Force bool
-}
-
-// ServiceCreateOptions contains the options to use when creating a service.
-type ServiceCreateOptions struct {
- // EncodedRegistryAuth is the encoded registry authorization credentials to
- // use when updating the service.
- //
- // This field follows the format of the X-Registry-Auth header.
- EncodedRegistryAuth string
-
- // QueryRegistry indicates whether the service update requires
- // contacting a registry. A registry may be contacted to retrieve
- // the image digest and manifest, which in turn can be used to update
- // platform or other information about the service.
- QueryRegistry bool
-}
-
-// Values for RegistryAuthFrom in ServiceUpdateOptions
-const (
- RegistryAuthFromSpec = "spec"
- RegistryAuthFromPreviousSpec = "previous-spec"
-)
-
-// ServiceUpdateOptions contains the options to be used for updating services.
-type ServiceUpdateOptions struct {
- // EncodedRegistryAuth is the encoded registry authorization credentials to
- // use when updating the service.
- //
- // This field follows the format of the X-Registry-Auth header.
- EncodedRegistryAuth string
-
- // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
- // into this field. While it does open API users up to racy writes, most
- // users may not need that level of consistency in practice.
-
- // RegistryAuthFrom specifies where to find the registry authorization
- // credentials if they are not given in EncodedRegistryAuth. Valid
- // values are "spec" and "previous-spec".
- RegistryAuthFrom string
-
- // Rollback indicates whether a server-side rollback should be
- // performed. When this is set, the provided spec will be ignored.
- // The valid values are "previous" and "none". An empty value is the
- // same as "none".
- Rollback string
-
- // QueryRegistry indicates whether the service update requires
- // contacting a registry. A registry may be contacted to retrieve
- // the image digest and manifest, which in turn can be used to update
- // platform or other information about the service.
- QueryRegistry bool
-}
-
-// ServiceListOptions holds parameters to list services with.
-type ServiceListOptions struct {
- Filters filters.Args
-
- // Status indicates whether the server should include the service task
- // count of running and desired tasks.
- Status bool
-}
-
-// ServiceInspectOptions holds parameters related to the "service inspect"
-// operation.
-type ServiceInspectOptions struct {
- InsertDefaults bool
-}
-
-// TaskListOptions holds parameters to list tasks with.
-type TaskListOptions struct {
- Filters filters.Args
-}
-
// PluginRemoveOptions holds parameters to remove plugins.
type PluginRemoveOptions struct {
Force bool
@@ -243,13 +79,6 @@ type PluginInstallOptions struct {
Args []string
}
-// SwarmUnlockKeyResponse contains the response for Engine API:
-// GET /swarm/unlockkey
-type SwarmUnlockKeyResponse struct {
- // UnlockKey is the unlock key in ASCII-armored format.
- UnlockKey string
-}
-
// PluginCreateOptions hold all options to plugin create.
type PluginCreateOptions struct {
RepoName string
diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/docker/docker/api/types/container/container.go
index 65fabbf425c..a191ca8bdb7 100644
--- a/vendor/github.com/docker/docker/api/types/container/container.go
+++ b/vendor/github.com/docker/docker/api/types/container/container.go
@@ -104,7 +104,7 @@ type MountPoint struct {
// State stores container's running state
// it's part of ContainerJSONBase and returned by "inspect" command
type State struct {
- Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
+ Status ContainerState // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
Running bool
Paused bool
Restarting bool
@@ -132,7 +132,7 @@ type Summary struct {
SizeRw int64 `json:",omitempty"`
SizeRootFs int64 `json:",omitempty"`
Labels map[string]string
- State string
+ State ContainerState
Status string
HostConfig struct {
NetworkMode string `json:",omitempty"`
diff --git a/vendor/github.com/docker/docker/api/types/container/disk_usage.go b/vendor/github.com/docker/docker/api/types/container/disk_usage.go
new file mode 100644
index 00000000000..05b6cbe9c70
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/disk_usage.go
@@ -0,0 +1,8 @@
+package container
+
+// DiskUsage contains disk usage for containers.
+type DiskUsage struct {
+ TotalSize int64
+ Reclaimable int64
+ Items []*Summary
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/health.go b/vendor/github.com/docker/docker/api/types/container/health.go
index 93663746f61..96e91cc8d8a 100644
--- a/vendor/github.com/docker/docker/api/types/container/health.go
+++ b/vendor/github.com/docker/docker/api/types/container/health.go
@@ -1,18 +1,27 @@
package container
-import "time"
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// HealthStatus is a string representation of the container's health.
+//
+// It currently is an alias for string, but may become a distinct type in future.
+type HealthStatus = string
// Health states
const (
- NoHealthcheck = "none" // Indicates there is no healthcheck
- Starting = "starting" // Starting indicates that the container is not yet ready
- Healthy = "healthy" // Healthy indicates that the container is running correctly
- Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
+ NoHealthcheck HealthStatus = "none" // Indicates there is no healthcheck
+ Starting HealthStatus = "starting" // Starting indicates that the container is not yet ready
+ Healthy HealthStatus = "healthy" // Healthy indicates that the container is running correctly
+ Unhealthy HealthStatus = "unhealthy" // Unhealthy indicates that the container has a problem
)
// Health stores information about the container's healthcheck results
type Health struct {
- Status string // Status is one of [Starting], [Healthy] or [Unhealthy].
+ Status HealthStatus // Status is one of [Starting], [Healthy] or [Unhealthy].
FailingStreak int // FailingStreak is the number of consecutive failures
Log []*HealthcheckResult // Log contains the last few results (oldest first)
}
@@ -24,3 +33,18 @@ type HealthcheckResult struct {
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
Output string // Output from last check
}
+
+var validHealths = []string{
+ NoHealthcheck, Starting, Healthy, Unhealthy,
+}
+
+// ValidateHealthStatus checks if the provided string is a valid
+// container [HealthStatus].
+func ValidateHealthStatus(s HealthStatus) error {
+ switch s {
+ case NoHealthcheck, Starting, Healthy, Unhealthy:
+ return nil
+ default:
+ return errInvalidParameter{error: fmt.Errorf("invalid value for health (%s): must be one of %s", s, strings.Join(validHealths, ", "))}
+ }
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
index 83198305e7a..87ca82683f5 100644
--- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
@@ -145,7 +145,7 @@ func (n NetworkMode) IsDefault() bool {
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
+ return !n.IsHost() && !n.IsContainer()
}
// IsContainer indicates whether container uses a container network stack.
@@ -230,7 +230,7 @@ type PidMode string
// IsPrivate indicates whether the container uses its own new pid namespace.
func (n PidMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
+ return !n.IsHost() && !n.IsContainer()
}
// IsHost indicates whether the container uses the host's pid namespace.
diff --git a/vendor/github.com/docker/docker/api/types/container/state.go b/vendor/github.com/docker/docker/api/types/container/state.go
new file mode 100644
index 00000000000..78d5c4fe85c
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/state.go
@@ -0,0 +1,64 @@
+package container
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ContainerState is a string representation of the container's current state.
+//
+// It currently is an alias for string, but may become a distinct type in the future.
+type ContainerState = string
+
+const (
+ StateCreated ContainerState = "created" // StateCreated indicates the container is created, but not (yet) started.
+ StateRunning ContainerState = "running" // StateRunning indicates that the container is running.
+ StatePaused ContainerState = "paused" // StatePaused indicates that the container's current state is paused.
+ StateRestarting ContainerState = "restarting" // StateRestarting indicates that the container is currently restarting.
+ StateRemoving ContainerState = "removing" // StateRemoving indicates that the container is being removed.
+ StateExited ContainerState = "exited" // StateExited indicates that the container exited.
+ StateDead ContainerState = "dead" // StateDead indicates that the container failed to be deleted. Containers in this state are attempted to be cleaned up when the daemon restarts.
+)
+
+var validStates = []ContainerState{
+ StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead,
+}
+
+// ValidateContainerState checks if the provided string is a valid
+// container [ContainerState].
+func ValidateContainerState(s ContainerState) error {
+ switch s {
+ case StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead:
+ return nil
+ default:
+ return errInvalidParameter{error: fmt.Errorf("invalid value for state (%s): must be one of %s", s, strings.Join(validStates, ", "))}
+ }
+}
+
+// StateStatus is used to return container wait results.
+// Implements exec.ExitCode interface.
+// This type is needed as State include a sync.Mutex field which make
+// copying it unsafe.
+type StateStatus struct {
+ exitCode int
+ err error
+}
+
+// ExitCode returns current exitcode for the state.
+func (s StateStatus) ExitCode() int {
+ return s.exitCode
+}
+
+// Err returns current error for the state. Returns nil if the container had
+// exited on its own.
+func (s StateStatus) Err() error {
+ return s.err
+}
+
+// NewStateStatus returns a new StateStatus with the given exit code and error.
+func NewStateStatus(exitCode int, err error) StateStatus {
+ return StateStatus{
+ exitCode: exitCode,
+ err: err,
+ }
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/disk_usage.go b/vendor/github.com/docker/docker/api/types/image/disk_usage.go
new file mode 100644
index 00000000000..b29d925cac4
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/disk_usage.go
@@ -0,0 +1,8 @@
+package image
+
+// DiskUsage contains disk usage for images.
+type DiskUsage struct {
+ TotalSize int64
+ Reclaimable int64
+ Items []*Summary
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/docker/docker/api/types/image/image_inspect.go
index 40d1f97a315..3bdb474287c 100644
--- a/vendor/github.com/docker/docker/api/types/image/image_inspect.go
+++ b/vendor/github.com/docker/docker/api/types/image/image_inspect.go
@@ -3,6 +3,7 @@ package image
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/storage"
+ dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -84,7 +85,7 @@ type InspectResponse struct {
// Author is the name of the author that was specified when committing the
// image, or as specified through MAINTAINER (deprecated) in the Dockerfile.
Author string
- Config *container.Config
+ Config *dockerspec.DockerOCIImageConfig
// Architecture is the hardware CPU architecture that the image runs on.
Architecture string
diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go
index 57800e0d47b..fd038557c00 100644
--- a/vendor/github.com/docker/docker/api/types/image/opts.go
+++ b/vendor/github.com/docker/docker/api/types/image/opts.go
@@ -83,6 +83,7 @@ type ListOptions struct {
// RemoveOptions holds parameters to remove images.
type RemoveOptions struct {
+ Platforms []ocispec.Platform
Force bool
PruneChildren bool
}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go
index f9a65187ffa..bdec82ffbd8 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/config.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/config.go
@@ -1,6 +1,10 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
-import "os"
+import (
+ "os"
+
+ "github.com/docker/docker/api/types/filters"
+)
// Config represents a config.
type Config struct {
@@ -44,3 +48,15 @@ type ConfigReference struct {
ConfigID string
ConfigName string
}
+
+// ConfigCreateResponse contains the information returned to a client
+// on the creation of a new config.
+type ConfigCreateResponse struct {
+ // ID is the id of the created config.
+ ID string
+}
+
+// ConfigListOptions holds parameters to list configs
+type ConfigListOptions struct {
+ Filters filters.Args
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go
index bb98d5eedc6..f175b1b2c63 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/node.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/node.go
@@ -1,4 +1,5 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
+import "github.com/docker/docker/api/types/filters"
// Node represents a node.
type Node struct {
@@ -137,3 +138,13 @@ const (
type Topology struct {
Segments map[string]string `json:",omitempty"`
}
+
+// NodeListOptions holds parameters to list nodes with.
+type NodeListOptions struct {
+ Filters filters.Args
+}
+
+// NodeRemoveOptions holds parameters to remove nodes with.
+type NodeRemoveOptions struct {
+ Force bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go
index aeb5bb54ad1..248cffab8e1 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/secret.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go
@@ -1,6 +1,10 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
-import "os"
+import (
+ "os"
+
+ "github.com/docker/docker/api/types/filters"
+)
// Secret represents a secret.
type Secret struct {
@@ -48,3 +52,15 @@ type SecretReference struct {
SecretID string
SecretName string
}
+
+// SecretCreateResponse contains the information returned to a client
+// on the creation of a new secret.
+type SecretCreateResponse struct {
+ // ID is the id of the created secret.
+ ID string
+}
+
+// SecretListOptions holds parameters to list secrets
+type SecretListOptions struct {
+ Filters filters.Args
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go
index 5b6d5ec1207..1d0a9a47bc1 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/service.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/service.go
@@ -1,6 +1,10 @@
package swarm // import "github.com/docker/docker/api/types/swarm"
-import "time"
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/filters"
+)
// Service represents a service.
type Service struct {
@@ -200,3 +204,69 @@ type JobStatus struct {
// Swarm manager.
LastExecution time.Time `json:",omitempty"`
}
+
+// ServiceCreateOptions contains the options to use when creating a service.
+type ServiceCreateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// Values for RegistryAuthFrom in ServiceUpdateOptions
+const (
+ RegistryAuthFromSpec = "spec"
+ RegistryAuthFromPreviousSpec = "previous-spec"
+)
+
+// ServiceUpdateOptions contains the options to be used for updating services.
+type ServiceUpdateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
+ // into this field. While it does open API users up to racy writes, most
+ // users may not need that level of consistency in practice.
+
+ // RegistryAuthFrom specifies where to find the registry authorization
+ // credentials if they are not given in EncodedRegistryAuth. Valid
+ // values are "spec" and "previous-spec".
+ RegistryAuthFrom string
+
+ // Rollback indicates whether a server-side rollback should be
+ // performed. When this is set, the provided spec will be ignored.
+ // The valid values are "previous" and "none". An empty value is the
+ // same as "none".
+ Rollback string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceListOptions holds parameters to list services with.
+type ServiceListOptions struct {
+ Filters filters.Args
+
+ // Status indicates whether the server should include the service task
+ // count of running and desired tasks.
+ Status bool
+}
+
+// ServiceInspectOptions holds parameters related to the "service inspect"
+// operation.
+type ServiceInspectOptions struct {
+ InsertDefaults bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
index 1b4be6fffba..2711c9eac5c 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -235,3 +235,10 @@ type UpdateFlags struct {
RotateManagerToken bool
RotateManagerUnlockKey bool
}
+
+// UnlockKeyResponse contains the response for Engine API:
+// GET /swarm/unlockkey
+type UnlockKeyResponse struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go
index ad3eeca0b7f..722d1ceb62e 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/task.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/task.go
@@ -3,6 +3,7 @@ package swarm // import "github.com/docker/docker/api/types/swarm"
import (
"time"
+ "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm/runtime"
)
@@ -223,3 +224,8 @@ type VolumeAttachment struct {
// in the ContainerSpec, that this volume fulfills.
Target string `json:",omitempty"`
}
+
+// TaskListOptions holds parameters to list tasks with.
+type TaskListOptions struct {
+ Filters filters.Args
+}
diff --git a/vendor/github.com/docker/docker/api/types/system/disk_usage.go b/vendor/github.com/docker/docker/api/types/system/disk_usage.go
new file mode 100644
index 00000000000..99078cf196d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/system/disk_usage.go
@@ -0,0 +1,17 @@
+package system
+
+import (
+ "github.com/docker/docker/api/types/build"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/volume"
+)
+
+// DiskUsage contains response of Engine API for API 1.49 and greater:
+// GET "/system/df"
+type DiskUsage struct {
+ Images *image.DiskUsage
+ Containers *container.DiskUsage
+ Volumes *volume.DiskUsage
+ BuildCache *build.CacheDiskUsage
+}
diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go
index 27173d4630a..047639ed91e 100644
--- a/vendor/github.com/docker/docker/api/types/system/info.go
+++ b/vendor/github.com/docker/docker/api/types/system/info.go
@@ -29,8 +29,6 @@ type Info struct {
CPUSet bool
PidsLimit bool
IPv4Forwarding bool
- BridgeNfIptables bool `json:"BridgeNfIptables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
- BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
Debug bool
NFd int
OomKillDisable bool
@@ -75,6 +73,7 @@ type Info struct {
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"`
CDISpecDirs []string
+ DiscoveredDevices []DeviceInfo `json:",omitempty"`
Containerd *ContainerdInfo `json:",omitempty"`
@@ -160,3 +159,12 @@ type FirewallInfo struct {
// Info is a list of label/value pairs, containing information related to the firewall.
Info [][2]string `json:"Info,omitempty"`
}
+
+// DeviceInfo represents a discoverable device from a device driver.
+type DeviceInfo struct {
+ // Source indicates the origin device driver.
+ Source string `json:"Source"`
+ // ID is the unique identifier for the device.
+ // Example: CDI FQDN like "vendor.com/gpu=0", or other driver-specific device ID
+ ID string `json:"ID"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
index cab5c32e3ff..edd1d6ecb22 100644
--- a/vendor/github.com/docker/docker/api/types/time/timestamp.go
+++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go
@@ -30,7 +30,7 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
var format string
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
- parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+ parseInLocation := !strings.ContainsAny(value, "zZ+") && strings.Count(value, "-") != 3
if strings.Contains(value, ".") {
if parseInLocation {
@@ -105,23 +105,23 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
// since := time.Unix(seconds, nanoseconds)
//
// returns seconds as defaultSeconds if value == ""
-func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) {
+func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, _ error) {
if value == "" {
return defaultSeconds, 0, nil
}
return parseTimestamp(value)
}
-func parseTimestamp(value string) (sec int64, nsec int64, err error) {
+func parseTimestamp(value string) (seconds int64, nanoseconds int64, _ error) {
s, n, ok := strings.Cut(value, ".")
- sec, err = strconv.ParseInt(s, 10, 64)
+ sec, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return sec, 0, err
}
if !ok {
return sec, 0, nil
}
- nsec, err = strconv.ParseInt(n, 10, 64)
+ nsec, err := strconv.ParseInt(n, 10, 64)
if err != nil {
return sec, nsec, err
}
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
index 82ae339c319..443cf82a937 100644
--- a/vendor/github.com/docker/docker/api/types/types.go
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -1,10 +1,8 @@
package types // import "github.com/docker/docker/api/types"
import (
- "time"
-
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
@@ -24,7 +22,7 @@ type Ping struct {
APIVersion string
OSType string
Experimental bool
- BuilderVersion BuilderVersion
+ BuilderVersion build.BuilderVersion
// SwarmStatus provides information about the current swarm status of the
// engine, obtained from the "Swarm" header in the API response.
@@ -91,41 +89,10 @@ type DiskUsage struct {
Images []*image.Summary
Containers []*container.Summary
Volumes []*volume.Volume
- BuildCache []*BuildCache
+ BuildCache []*build.CacheRecord
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
}
-// BuildCachePruneReport contains the response for Engine API:
-// POST "/build/prune"
-type BuildCachePruneReport struct {
- CachesDeleted []string
- SpaceReclaimed uint64
-}
-
-// SecretCreateResponse contains the information returned to a client
-// on the creation of a new secret.
-type SecretCreateResponse struct {
- // ID is the id of the created secret.
- ID string
-}
-
-// SecretListOptions holds parameters to list secrets
-type SecretListOptions struct {
- Filters filters.Args
-}
-
-// ConfigCreateResponse contains the information returned to a client
-// on the creation of a new config.
-type ConfigCreateResponse struct {
- // ID is the id of the created config.
- ID string
-}
-
-// ConfigListOptions holds parameters to list configs
-type ConfigListOptions struct {
- Filters filters.Args
-}
-
// PushResult contains the tag, manifest digest, and manifest size from the
// push. It's used to signal this information to the trust code in the client
// so it can sign the manifest if necessary.
@@ -134,46 +101,3 @@ type PushResult struct {
Digest string
Size int
}
-
-// BuildResult contains the image id of a successful build
-type BuildResult struct {
- ID string
-}
-
-// BuildCache contains information about a build cache record.
-type BuildCache struct {
- // ID is the unique ID of the build cache record.
- ID string
- // Parent is the ID of the parent build cache record.
- //
- // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
- Parent string `json:"Parent,omitempty"`
- // Parents is the list of parent build cache record IDs.
- Parents []string `json:" Parents,omitempty"`
- // Type is the cache record type.
- Type string
- // Description is a description of the build-step that produced the build cache.
- Description string
- // InUse indicates if the build cache is in use.
- InUse bool
- // Shared indicates if the build cache is shared.
- Shared bool
- // Size is the amount of disk space used by the build cache (in bytes).
- Size int64
- // CreatedAt is the date and time at which the build cache was created.
- CreatedAt time.Time
- // LastUsedAt is the date and time at which the build cache was last used.
- LastUsedAt *time.Time
- UsageCount int
-}
-
-// BuildCachePruneOptions hold parameters to prune the build cache
-type BuildCachePruneOptions struct {
- All bool
- ReservedSpace int64
- MaxUsedSpace int64
- MinFreeSpace int64
- Filters filters.Args
-
- KeepStorage int64 // Deprecated: deprecated in API 1.48.
-}
diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go
index 93e4336adcb..8456a45607e 100644
--- a/vendor/github.com/docker/docker/api/types/types_deprecated.go
+++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go
@@ -3,10 +3,12 @@ package types
import (
"context"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/common"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/storage"
+ "github.com/docker/docker/api/types/swarm"
)
// IDResponse Response to an API call that returns just an Id.
@@ -113,3 +115,127 @@ type ImageInspect = image.InspectResponse
//
// Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
type RequestPrivilegeFunc func(context.Context) (string, error)
+
+// SecretCreateResponse contains the information returned to a client
+// on the creation of a new secret.
+//
+// Deprecated: use [swarm.SecretCreateResponse].
+type SecretCreateResponse = swarm.SecretCreateResponse
+
+// SecretListOptions holds parameters to list secrets
+//
+// Deprecated: use [swarm.SecretListOptions].
+type SecretListOptions = swarm.SecretListOptions
+
+// ConfigCreateResponse contains the information returned to a client
+// on the creation of a new config.
+//
+// Deprecated: use [swarm.ConfigCreateResponse].
+type ConfigCreateResponse = swarm.ConfigCreateResponse
+
+// ConfigListOptions holds parameters to list configs
+//
+// Deprecated: use [swarm.ConfigListOptions].
+type ConfigListOptions = swarm.ConfigListOptions
+
+// NodeListOptions holds parameters to list nodes with.
+//
+// Deprecated: use [swarm.NodeListOptions].
+type NodeListOptions = swarm.NodeListOptions
+
+// NodeRemoveOptions holds parameters to remove nodes with.
+//
+// Deprecated: use [swarm.NodeRemoveOptions].
+type NodeRemoveOptions = swarm.NodeRemoveOptions
+
+// TaskListOptions holds parameters to list tasks with.
+//
+// Deprecated: use [swarm.TaskListOptions].
+type TaskListOptions = swarm.TaskListOptions
+
+// ServiceCreateOptions contains the options to use when creating a service.
+//
+// Deprecated: use [swarm.ServiceCreateOptions].
+type ServiceCreateOptions = swarm.ServiceCreateOptions
+
+// ServiceUpdateOptions contains the options to be used for updating services.
+//
+// Deprecated: use [swarm.ServiceCreateOptions].
+type ServiceUpdateOptions = swarm.ServiceUpdateOptions
+
+const (
+ RegistryAuthFromSpec = swarm.RegistryAuthFromSpec // Deprecated: use [swarm.RegistryAuthFromSpec].
+ RegistryAuthFromPreviousSpec = swarm.RegistryAuthFromPreviousSpec // Deprecated: use [swarm.RegistryAuthFromPreviousSpec].
+)
+
+// ServiceListOptions holds parameters to list services with.
+//
+// Deprecated: use [swarm.ServiceListOptions].
+type ServiceListOptions = swarm.ServiceListOptions
+
+// ServiceInspectOptions holds parameters related to the "service inspect"
+// operation.
+//
+// Deprecated: use [swarm.ServiceInspectOptions].
+type ServiceInspectOptions = swarm.ServiceInspectOptions
+
+// SwarmUnlockKeyResponse contains the response for Engine API:
+// GET /swarm/unlockkey
+//
+// Deprecated: use [swarm.UnlockKeyResponse].
+type SwarmUnlockKeyResponse = swarm.UnlockKeyResponse
+
+// BuildCache contains information about a build cache record.
+//
+// Deprecated: deprecated in API 1.49. Use [build.CacheRecord] instead.
+type BuildCache = build.CacheRecord
+
+// BuildCachePruneOptions hold parameters to prune the build cache
+//
+// Deprecated: use [build.CachePruneOptions].
+type BuildCachePruneOptions = build.CachePruneOptions
+
+// BuildCachePruneReport contains the response for Engine API:
+// POST "/build/prune"
+//
+// Deprecated: use [build.CachePruneReport].
+type BuildCachePruneReport = build.CachePruneReport
+
+// BuildResult contains the image id of a successful build/
+//
+// Deprecated: use [build.Result].
+type BuildResult = build.Result
+
+// ImageBuildOptions holds the information
+// necessary to build images.
+//
+// Deprecated: use [build.ImageBuildOptions].
+type ImageBuildOptions = build.ImageBuildOptions
+
+// ImageBuildOutput defines configuration for exporting a build result
+//
+// Deprecated: use [build.ImageBuildOutput].
+type ImageBuildOutput = build.ImageBuildOutput
+
+// ImageBuildResponse holds information
+// returned by a server after building
+// an image.
+//
+// Deprecated: use [build.ImageBuildResponse].
+type ImageBuildResponse = build.ImageBuildResponse
+
+// BuilderVersion sets the version of underlying builder to use
+//
+// Deprecated: use [build.BuilderVersion].
+type BuilderVersion = build.BuilderVersion
+
+const (
+ // BuilderV1 is the first generation builder in docker daemon
+ //
+ // Deprecated: use [build.BuilderV1].
+ BuilderV1 = build.BuilderV1
+ // BuilderBuildKit is builder based on moby/buildkit project
+ //
+ // Deprecated: use [build.BuilderBuildKit].
+ BuilderBuildKit = build.BuilderBuildKit
+)
diff --git a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go
new file mode 100644
index 00000000000..3d716c6e00d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go
@@ -0,0 +1,8 @@
+package volume
+
+// DiskUsage contains disk usage for volumes.
+type DiskUsage struct {
+ TotalSize int64
+ Reclaimable int64
+ Items []*Volume
+}
diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go
index 92b47d18384..db8fad55d3e 100644
--- a/vendor/github.com/docker/docker/client/build_prune.go
+++ b/vendor/github.com/docker/docker/client/build_prune.go
@@ -6,13 +6,13 @@ import (
"net/url"
"strconv"
- "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/filters"
"github.com/pkg/errors"
)
// BuildCachePrune requests the daemon to delete unused cache data
-func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
+func (cli *Client) BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) {
if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil {
return nil, err
}
@@ -47,7 +47,7 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru
return nil, err
}
- report := types.BuildCachePruneReport{}
+ report := build.CachePruneReport{}
if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
return nil, errors.Wrap(err, "error retrieving disk usage")
}
diff --git a/vendor/github.com/docker/docker/client/client_interfaces.go b/vendor/github.com/docker/docker/client/client_interfaces.go
index f70d8ffa01a..fe4b1af9b03 100644
--- a/vendor/github.com/docker/docker/client/client_interfaces.go
+++ b/vendor/github.com/docker/docker/client/client_interfaces.go
@@ -7,6 +7,7 @@ import (
"net/http"
"github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
@@ -109,8 +110,8 @@ type DistributionAPIClient interface {
// ImageAPIClient defines API client methods for the images
type ImageAPIClient interface {
- ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
- BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
+ ImageBuild(ctx context.Context, context io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error)
+ BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error)
BuildCancel(ctx context.Context, id string) error
ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error)
ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error)
@@ -154,8 +155,8 @@ type NetworkAPIClient interface {
// NodeAPIClient defines API client methods for the nodes
type NodeAPIClient interface {
NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
- NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
- NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
+ NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error)
+ NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error
NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
}
@@ -175,22 +176,22 @@ type PluginAPIClient interface {
// ServiceAPIClient defines API client methods for the services
type ServiceAPIClient interface {
- ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error)
- ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
- ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+ ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error)
+ ServiceInspectWithRaw(ctx context.Context, serviceID string, options swarm.ServiceInspectOptions) (swarm.Service, []byte, error)
+ ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error)
ServiceRemove(ctx context.Context, serviceID string) error
- ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error)
+ ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error)
ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error)
TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error)
TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
- TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+ TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error)
}
// SwarmAPIClient defines API client methods for the swarm
type SwarmAPIClient interface {
SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
- SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
+ SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error)
SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
SwarmLeave(ctx context.Context, force bool) error
SwarmInspect(ctx context.Context) (swarm.Swarm, error)
@@ -219,8 +220,8 @@ type VolumeAPIClient interface {
// SecretAPIClient defines API client methods for secrets
type SecretAPIClient interface {
- SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
- SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
+ SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error)
+ SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error)
SecretRemove(ctx context.Context, id string) error
SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
@@ -228,8 +229,8 @@ type SecretAPIClient interface {
// ConfigAPIClient defines API client methods for configs
type ConfigAPIClient interface {
- ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
- ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
+ ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error)
+ ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error)
ConfigRemove(ctx context.Context, id string) error
ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go
index c7ea6d2eba4..1fbfc21f924 100644
--- a/vendor/github.com/docker/docker/client/config_create.go
+++ b/vendor/github.com/docker/docker/client/config_create.go
@@ -4,13 +4,12 @@ import (
"context"
"encoding/json"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
)
// ConfigCreate creates a new config.
-func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
- var response types.ConfigCreateResponse
+func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) {
+ var response swarm.ConfigCreateResponse
if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil {
return response, err
}
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
index 7e4a8ea567d..67779e98aa6 100644
--- a/vendor/github.com/docker/docker/client/config_list.go
+++ b/vendor/github.com/docker/docker/client/config_list.go
@@ -5,13 +5,12 @@ import (
"encoding/json"
"net/url"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
)
// ConfigList returns the list of configs.
-func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
+func (cli *Client) ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) {
if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
index 609f92ce662..7bd85931477 100644
--- a/vendor/github.com/docker/docker/client/errors.go
+++ b/vendor/github.com/docker/docker/client/errors.go
@@ -4,9 +4,11 @@ import (
"context"
"errors"
"fmt"
+ "net/http"
+ cerrdefs "github.com/containerd/errdefs"
+ "github.com/containerd/errdefs/pkg/errhttp"
"github.com/docker/docker/api/types/versions"
- "github.com/docker/docker/errdefs"
)
// errConnectionFailed implements an error returned when connection failed.
@@ -48,9 +50,11 @@ func connectionFailed(host string) error {
}
// IsErrNotFound returns true if the error is a NotFound error, which is returned
-// by the API when some object is not found. It is an alias for [errdefs.IsNotFound].
+// by the API when some object is not found. It is an alias for [cerrdefs.IsNotFound].
+//
+// Deprecated: use [cerrdefs.IsNotFound] instead.
func IsErrNotFound(err error) bool {
- return errdefs.IsNotFound(err)
+ return cerrdefs.IsNotFound(err)
}
type objectNotFoundError struct {
@@ -83,3 +87,43 @@ func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature str
}
return nil
}
+
+type httpError struct {
+ err error
+ errdef error
+}
+
+func (e *httpError) Error() string {
+ return e.err.Error()
+}
+
+func (e *httpError) Unwrap() error {
+ return e.err
+}
+
+func (e *httpError) Is(target error) bool {
+ return errors.Is(e.errdef, target)
+}
+
+// httpErrorFromStatusCode creates an errdef error, based on the provided HTTP status-code
+func httpErrorFromStatusCode(err error, statusCode int) error {
+ if err == nil {
+ return nil
+ }
+ base := errhttp.ToNative(statusCode)
+ if base != nil {
+ return &httpError{err: err, errdef: base}
+ }
+
+ switch {
+ case statusCode >= http.StatusOK && statusCode < http.StatusBadRequest:
+ // it's a client error
+ return err
+ case statusCode >= http.StatusBadRequest && statusCode < http.StatusInternalServerError:
+ return &httpError{err: err, errdef: cerrdefs.ErrInvalidArgument}
+ case statusCode >= http.StatusInternalServerError && statusCode < 600:
+ return &httpError{err: err, errdef: cerrdefs.ErrInternal}
+ default:
+ return &httpError{err: err, errdef: cerrdefs.ErrUnknown}
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go
index 6e2a40687bf..28b74a3f1a9 100644
--- a/vendor/github.com/docker/docker/client/image_build.go
+++ b/vendor/github.com/docker/docker/client/image_build.go
@@ -10,7 +10,7 @@ import (
"strconv"
"strings"
- "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
)
@@ -18,15 +18,15 @@ import (
// ImageBuild sends a request to the daemon to build images.
// The Body in the response implements an io.ReadCloser and it's up to the caller to
// close it.
-func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
+func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) {
query, err := cli.imageBuildOptionsToQuery(ctx, options)
if err != nil {
- return types.ImageBuildResponse{}, err
+ return build.ImageBuildResponse{}, err
}
buf, err := json.Marshal(options.AuthConfigs)
if err != nil {
- return types.ImageBuildResponse{}, err
+ return build.ImageBuildResponse{}, err
}
headers := http.Header{}
@@ -35,16 +35,16 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio
resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
if err != nil {
- return types.ImageBuildResponse{}, err
+ return build.ImageBuildResponse{}, err
}
- return types.ImageBuildResponse{
+ return build.ImageBuildResponse{
Body: resp.Body,
OSType: getDockerOS(resp.Header.Get("Server")),
}, nil
}
-func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) {
+func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options build.ImageBuildOptions) (url.Values, error) {
query := url.Values{}
if len(options.Tags) > 0 {
query["t"] = options.Tags
diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go
index f5fe85dd47b..ae8c807f77c 100644
--- a/vendor/github.com/docker/docker/client/image_pull.go
+++ b/vendor/github.com/docker/docker/client/image_pull.go
@@ -6,9 +6,9 @@ import (
"net/url"
"strings"
+ cerrdefs "github.com/containerd/errdefs"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/image"
- "github.com/docker/docker/errdefs"
)
// ImagePull requests the docker host to pull an image from a remote registry.
@@ -35,7 +35,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P
}
resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
return nil, privilegeErr
diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go
index 1a343f43f18..52103044030 100644
--- a/vendor/github.com/docker/docker/client/image_push.go
+++ b/vendor/github.com/docker/docker/client/image_push.go
@@ -9,10 +9,10 @@ import (
"net/http"
"net/url"
+ cerrdefs "github.com/containerd/errdefs"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/errdefs"
)
// ImagePush requests the docker host to push an image to a remote registry.
@@ -52,7 +52,7 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
}
resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
return nil, privilegeErr
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
index b0c87ca09c9..0d769139b8e 100644
--- a/vendor/github.com/docker/docker/client/image_remove.go
+++ b/vendor/github.com/docker/docker/client/image_remove.go
@@ -19,6 +19,14 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options imag
query.Set("noprune", "1")
}
+ if len(options.Platforms) > 0 {
+ p, err := encodePlatforms(options.Platforms...)
+ if err != nil {
+ return nil, err
+ }
+ query["platforms"] = p
+ }
+
resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
defer ensureReaderClosed(resp)
if err != nil {
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
index 0a7b5ec2265..f3aab43a802 100644
--- a/vendor/github.com/docker/docker/client/image_search.go
+++ b/vendor/github.com/docker/docker/client/image_search.go
@@ -7,9 +7,9 @@ import (
"net/url"
"strconv"
+ cerrdefs "github.com/containerd/errdefs"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/errdefs"
)
// ImageSearch makes the docker host search by a term in a remote registry.
@@ -32,7 +32,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options registr
resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
defer ensureReaderClosed(resp)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
return results, privilegeErr
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
index 2534f4aee1d..429eec24430 100644
--- a/vendor/github.com/docker/docker/client/node_list.go
+++ b/vendor/github.com/docker/docker/client/node_list.go
@@ -5,13 +5,12 @@ import (
"encoding/json"
"net/url"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
)
// NodeList returns the list of nodes.
-func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+func (cli *Client) NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) {
query := url.Values{}
if options.Filters.Len() > 0 {
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
index 81f8fed6b59..07d8e653649 100644
--- a/vendor/github.com/docker/docker/client/node_remove.go
+++ b/vendor/github.com/docker/docker/client/node_remove.go
@@ -4,11 +4,11 @@ import (
"context"
"net/url"
- "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
)
// NodeRemove removes a Node.
-func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error {
nodeID, err := trimID("node", nodeID)
if err != nil {
return err
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
index c7645e56d60..2ffa5945f45 100644
--- a/vendor/github.com/docker/docker/client/ping.go
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -7,6 +7,7 @@ import (
"strings"
"github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/swarm"
)
@@ -67,7 +68,7 @@ func parsePingResponse(cli *Client, resp *http.Response) (types.Ping, error) {
ping.Experimental = true
}
if bv := resp.Header.Get("Builder-Version"); bv != "" {
- ping.BuilderVersion = types.BuilderVersion(bv)
+ ping.BuilderVersion = build.BuilderVersion(bv)
}
if si := resp.Header.Get("Swarm"); si != "" {
state, role, _ := strings.Cut(si, "/")
diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go
index b04dcf9a108..8553961ba69 100644
--- a/vendor/github.com/docker/docker/client/plugin_install.go
+++ b/vendor/github.com/docker/docker/client/plugin_install.go
@@ -7,15 +7,15 @@ import (
"net/http"
"net/url"
+ cerrdefs "github.com/containerd/errdefs"
"github.com/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/errdefs"
"github.com/pkg/errors"
)
// PluginInstall installs a plugin
-func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (_ io.ReadCloser, retErr error) {
query := url.Values{}
if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
return nil, errors.Wrap(err, "invalid remote reference")
@@ -45,7 +45,7 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types
return
}
defer func() {
- if err != nil {
+ if retErr != nil {
delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
ensureReaderClosed(delResp)
}
@@ -82,7 +82,7 @@ func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileg
func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) {
resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
// todo: do inspect before to check existing name before checking privileges
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index 4cc64350bdb..d9074a736c7 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -15,7 +15,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
- "github.com/docker/docker/errdefs"
"github.com/pkg/errors"
)
@@ -116,10 +115,8 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u
resp, err := cli.doRequest(req)
switch {
- case errors.Is(err, context.Canceled):
- return nil, errdefs.Cancelled(err)
- case errors.Is(err, context.DeadlineExceeded):
- return nil, errdefs.Deadline(err)
+ case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded):
+ return nil, err
case err == nil:
return resp, cli.checkResponseErr(resp)
default:
@@ -195,11 +192,11 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) {
if serverResp == nil {
return nil
}
- if serverResp.StatusCode >= 200 && serverResp.StatusCode < 400 {
+ if serverResp.StatusCode >= http.StatusOK && serverResp.StatusCode < http.StatusBadRequest {
return nil
}
defer func() {
- retErr = errdefs.FromStatusCode(retErr, serverResp.StatusCode)
+ retErr = httpErrorFromStatusCode(retErr, serverResp.StatusCode)
}()
var body []byte
diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go
index bbd11918777..aee051b9028 100644
--- a/vendor/github.com/docker/docker/client/secret_create.go
+++ b/vendor/github.com/docker/docker/client/secret_create.go
@@ -4,22 +4,21 @@ import (
"context"
"encoding/json"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
)
// SecretCreate creates a new secret.
-func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
+func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) {
if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil {
- return types.SecretCreateResponse{}, err
+ return swarm.SecretCreateResponse{}, err
}
resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil)
defer ensureReaderClosed(resp)
if err != nil {
- return types.SecretCreateResponse{}, err
+ return swarm.SecretCreateResponse{}, err
}
- var response types.SecretCreateResponse
+ var response swarm.SecretCreateResponse
err = json.NewDecoder(resp.Body).Decode(&response)
return response, err
}
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
index e3b7dbdb9e6..b158d99ed9a 100644
--- a/vendor/github.com/docker/docker/client/secret_list.go
+++ b/vendor/github.com/docker/docker/client/secret_list.go
@@ -5,13 +5,12 @@ import (
"encoding/json"
"net/url"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
)
// SecretList returns the list of secrets.
-func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
+func (cli *Client) SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) {
if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
index 54c03b13894..6b9932ae2e1 100644
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -8,7 +8,6 @@ import (
"strings"
"github.com/distribution/reference"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
@@ -17,7 +16,7 @@ import (
)
// ServiceCreate creates a new service.
-func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) {
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) {
var response swarm.ServiceCreateResponse
// Make sure we negotiated (if the client is configured to do so),
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
index 77b4402d37a..892e9004fe1 100644
--- a/vendor/github.com/docker/docker/client/service_inspect.go
+++ b/vendor/github.com/docker/docker/client/service_inspect.go
@@ -8,12 +8,11 @@ import (
"io"
"net/url"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
)
// ServiceInspectWithRaw returns the service information and the raw data.
-func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts swarm.ServiceInspectOptions) (swarm.Service, []byte, error) {
serviceID, err := trimID("service", serviceID)
if err != nil {
return swarm.Service{}, nil, err
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
index f589a8423ad..019873bb6df 100644
--- a/vendor/github.com/docker/docker/client/service_list.go
+++ b/vendor/github.com/docker/docker/client/service_list.go
@@ -5,13 +5,12 @@ import (
"encoding/json"
"net/url"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
)
// ServiceList returns the list of services.
-func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+func (cli *Client) ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) {
query := url.Values{}
if options.Filters.Len() > 0 {
diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go
index ecb98f46840..e0c1a26648b 100644
--- a/vendor/github.com/docker/docker/client/service_update.go
+++ b/vendor/github.com/docker/docker/client/service_update.go
@@ -6,7 +6,6 @@ import (
"net/http"
"net/url"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
@@ -15,7 +14,7 @@ import (
// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes.
// It should be the value as set *before* the update. You can find this value in the Meta field
// of swarm.Service, which can be found using ServiceInspectWithRaw.
-func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
serviceID, err := trimID("service", serviceID)
if err != nil {
return swarm.ServiceUpdateResponse{}, err
diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
index 271fc08c95f..6e30daf6159 100644
--- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
+++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
@@ -4,18 +4,18 @@ import (
"context"
"encoding/json"
- "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
)
// SwarmGetUnlockKey retrieves the swarm's unlock key.
-func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
+func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) {
resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
defer ensureReaderClosed(resp)
if err != nil {
- return types.SwarmUnlockKeyResponse{}, err
+ return swarm.UnlockKeyResponse{}, err
}
- var response types.SwarmUnlockKeyResponse
+ var response swarm.UnlockKeyResponse
err = json.NewDecoder(resp.Body).Decode(&response)
return response, err
}
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
index aba7f61e655..5d540c38327 100644
--- a/vendor/github.com/docker/docker/client/task_list.go
+++ b/vendor/github.com/docker/docker/client/task_list.go
@@ -5,13 +5,12 @@ import (
"encoding/json"
"net/url"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
)
// TaskList returns the list of tasks.
-func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
+func (cli *Client) TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) {
query := url.Values{}
if options.Filters.Len() > 0 {
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
index 925d4d8d385..27f2b9884b5 100644
--- a/vendor/github.com/docker/docker/client/utils.go
+++ b/vendor/github.com/docker/docker/client/utils.go
@@ -6,8 +6,8 @@ import (
"net/url"
"strings"
+ cerrdefs "github.com/containerd/errdefs"
"github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/errdefs"
"github.com/docker/docker/internal/lazyregexp"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -90,7 +90,7 @@ func encodePlatforms(platform ...ocispec.Platform) ([]string, error) {
func encodePlatform(platform *ocispec.Platform) (string, error) {
p, err := json.Marshal(platform)
if err != nil {
- return "", errdefs.InvalidParameter(fmt.Errorf("invalid platform: %v", err))
+ return "", fmt.Errorf("%w: invalid platform: %v", cerrdefs.ErrInvalidArgument, err)
}
return string(p), nil
}
diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go
deleted file mode 100644
index a5523c3e95f..00000000000
--- a/vendor/github.com/docker/docker/errdefs/defs.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package errdefs
-
-// ErrNotFound signals that the requested object doesn't exist
-type ErrNotFound interface {
- NotFound()
-}
-
-// ErrInvalidParameter signals that the user input is invalid
-type ErrInvalidParameter interface {
- InvalidParameter()
-}
-
-// ErrConflict signals that some internal state conflicts with the requested action and can't be performed.
-// A change in state should be able to clear this error.
-type ErrConflict interface {
- Conflict()
-}
-
-// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action
-type ErrUnauthorized interface {
- Unauthorized()
-}
-
-// ErrUnavailable signals that the requested action/subsystem is not available.
-type ErrUnavailable interface {
- Unavailable()
-}
-
-// ErrForbidden signals that the requested action cannot be performed under any circumstances.
-// When a ErrForbidden is returned, the caller should never retry the action.
-type ErrForbidden interface {
- Forbidden()
-}
-
-// ErrSystem signals that some internal error occurred.
-// An example of this would be a failed mount request.
-type ErrSystem interface {
- System()
-}
-
-// ErrNotModified signals that an action can't be performed because it's already in the desired state
-type ErrNotModified interface {
- NotModified()
-}
-
-// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured.
-type ErrNotImplemented interface {
- NotImplemented()
-}
-
-// ErrUnknown signals that the kind of error that occurred is not known.
-type ErrUnknown interface {
- Unknown()
-}
-
-// ErrCancelled signals that the action was cancelled.
-type ErrCancelled interface {
- Cancelled()
-}
-
-// ErrDeadline signals that the deadline was reached before the action completed.
-type ErrDeadline interface {
- DeadlineExceeded()
-}
-
-// ErrDataLoss indicates that data was lost or there is data corruption.
-type ErrDataLoss interface {
- DataLoss()
-}
diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go
deleted file mode 100644
index c211f174fc1..00000000000
--- a/vendor/github.com/docker/docker/errdefs/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors.
-// Errors that cross the package boundary should implement one (and only one) of these interfaces.
-//
-// Packages should not reference these interfaces directly, only implement them.
-// To check if a particular error implements one of these interfaces, there are helper
-// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly.
-// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`).
-package errdefs // import "github.com/docker/docker/errdefs"
diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go
deleted file mode 100644
index ab76e627369..00000000000
--- a/vendor/github.com/docker/docker/errdefs/helpers.go
+++ /dev/null
@@ -1,305 +0,0 @@
-package errdefs
-
-import "context"
-
-type errNotFound struct{ error }
-
-func (errNotFound) NotFound() {}
-
-func (e errNotFound) Cause() error {
- return e.error
-}
-
-func (e errNotFound) Unwrap() error {
- return e.error
-}
-
-// NotFound creates an [ErrNotFound] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrNotFound],
-func NotFound(err error) error {
- if err == nil || IsNotFound(err) {
- return err
- }
- return errNotFound{err}
-}
-
-type errInvalidParameter struct{ error }
-
-func (errInvalidParameter) InvalidParameter() {}
-
-func (e errInvalidParameter) Cause() error {
- return e.error
-}
-
-func (e errInvalidParameter) Unwrap() error {
- return e.error
-}
-
-// InvalidParameter creates an [ErrInvalidParameter] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrInvalidParameter],
-func InvalidParameter(err error) error {
- if err == nil || IsInvalidParameter(err) {
- return err
- }
- return errInvalidParameter{err}
-}
-
-type errConflict struct{ error }
-
-func (errConflict) Conflict() {}
-
-func (e errConflict) Cause() error {
- return e.error
-}
-
-func (e errConflict) Unwrap() error {
- return e.error
-}
-
-// Conflict creates an [ErrConflict] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrConflict],
-func Conflict(err error) error {
- if err == nil || IsConflict(err) {
- return err
- }
- return errConflict{err}
-}
-
-type errUnauthorized struct{ error }
-
-func (errUnauthorized) Unauthorized() {}
-
-func (e errUnauthorized) Cause() error {
- return e.error
-}
-
-func (e errUnauthorized) Unwrap() error {
- return e.error
-}
-
-// Unauthorized creates an [ErrUnauthorized] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrUnauthorized],
-func Unauthorized(err error) error {
- if err == nil || IsUnauthorized(err) {
- return err
- }
- return errUnauthorized{err}
-}
-
-type errUnavailable struct{ error }
-
-func (errUnavailable) Unavailable() {}
-
-func (e errUnavailable) Cause() error {
- return e.error
-}
-
-func (e errUnavailable) Unwrap() error {
- return e.error
-}
-
-// Unavailable creates an [ErrUnavailable] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrUnavailable],
-func Unavailable(err error) error {
- if err == nil || IsUnavailable(err) {
- return err
- }
- return errUnavailable{err}
-}
-
-type errForbidden struct{ error }
-
-func (errForbidden) Forbidden() {}
-
-func (e errForbidden) Cause() error {
- return e.error
-}
-
-func (e errForbidden) Unwrap() error {
- return e.error
-}
-
-// Forbidden creates an [ErrForbidden] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrForbidden],
-func Forbidden(err error) error {
- if err == nil || IsForbidden(err) {
- return err
- }
- return errForbidden{err}
-}
-
-type errSystem struct{ error }
-
-func (errSystem) System() {}
-
-func (e errSystem) Cause() error {
- return e.error
-}
-
-func (e errSystem) Unwrap() error {
- return e.error
-}
-
-// System creates an [ErrSystem] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrSystem],
-func System(err error) error {
- if err == nil || IsSystem(err) {
- return err
- }
- return errSystem{err}
-}
-
-type errNotModified struct{ error }
-
-func (errNotModified) NotModified() {}
-
-func (e errNotModified) Cause() error {
- return e.error
-}
-
-func (e errNotModified) Unwrap() error {
- return e.error
-}
-
-// NotModified creates an [ErrNotModified] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [NotModified],
-func NotModified(err error) error {
- if err == nil || IsNotModified(err) {
- return err
- }
- return errNotModified{err}
-}
-
-type errNotImplemented struct{ error }
-
-func (errNotImplemented) NotImplemented() {}
-
-func (e errNotImplemented) Cause() error {
- return e.error
-}
-
-func (e errNotImplemented) Unwrap() error {
- return e.error
-}
-
-// NotImplemented creates an [ErrNotImplemented] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrNotImplemented],
-func NotImplemented(err error) error {
- if err == nil || IsNotImplemented(err) {
- return err
- }
- return errNotImplemented{err}
-}
-
-type errUnknown struct{ error }
-
-func (errUnknown) Unknown() {}
-
-func (e errUnknown) Cause() error {
- return e.error
-}
-
-func (e errUnknown) Unwrap() error {
- return e.error
-}
-
-// Unknown creates an [ErrUnknown] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrUnknown],
-func Unknown(err error) error {
- if err == nil || IsUnknown(err) {
- return err
- }
- return errUnknown{err}
-}
-
-type errCancelled struct{ error }
-
-func (errCancelled) Cancelled() {}
-
-func (e errCancelled) Cause() error {
- return e.error
-}
-
-func (e errCancelled) Unwrap() error {
- return e.error
-}
-
-// Cancelled creates an [ErrCancelled] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrCancelled],
-func Cancelled(err error) error {
- if err == nil || IsCancelled(err) {
- return err
- }
- return errCancelled{err}
-}
-
-type errDeadline struct{ error }
-
-func (errDeadline) DeadlineExceeded() {}
-
-func (e errDeadline) Cause() error {
- return e.error
-}
-
-func (e errDeadline) Unwrap() error {
- return e.error
-}
-
-// Deadline creates an [ErrDeadline] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrDeadline],
-func Deadline(err error) error {
- if err == nil || IsDeadline(err) {
- return err
- }
- return errDeadline{err}
-}
-
-type errDataLoss struct{ error }
-
-func (errDataLoss) DataLoss() {}
-
-func (e errDataLoss) Cause() error {
- return e.error
-}
-
-func (e errDataLoss) Unwrap() error {
- return e.error
-}
-
-// DataLoss creates an [ErrDataLoss] error from the given error.
-// It returns the error as-is if it is either nil (no error) or already implements
-// [ErrDataLoss],
-func DataLoss(err error) error {
- if err == nil || IsDataLoss(err) {
- return err
- }
- return errDataLoss{err}
-}
-
-// FromContext returns the error class from the passed in context
-func FromContext(ctx context.Context) error {
- e := ctx.Err()
- if e == nil {
- return nil
- }
-
- if e == context.Canceled {
- return Cancelled(e)
- }
- if e == context.DeadlineExceeded {
- return Deadline(e)
- }
- return Unknown(e)
-}
diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go
deleted file mode 100644
index 0a8fadd48f1..00000000000
--- a/vendor/github.com/docker/docker/errdefs/http_helpers.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package errdefs
-
-import (
- "net/http"
-)
-
-// FromStatusCode creates an errdef error, based on the provided HTTP status-code
-func FromStatusCode(err error, statusCode int) error {
- if err == nil {
- return nil
- }
- switch statusCode {
- case http.StatusNotFound:
- return NotFound(err)
- case http.StatusBadRequest:
- return InvalidParameter(err)
- case http.StatusConflict:
- return Conflict(err)
- case http.StatusUnauthorized:
- return Unauthorized(err)
- case http.StatusServiceUnavailable:
- return Unavailable(err)
- case http.StatusForbidden:
- return Forbidden(err)
- case http.StatusNotModified:
- return NotModified(err)
- case http.StatusNotImplemented:
- return NotImplemented(err)
- case http.StatusInternalServerError:
- if IsCancelled(err) || IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) {
- return err
- }
- return System(err)
- default:
- switch {
- case statusCode >= 200 && statusCode < 400:
- // it's a client error
- return err
- case statusCode >= 400 && statusCode < 500:
- return InvalidParameter(err)
- case statusCode >= 500 && statusCode < 600:
- return System(err)
- default:
- return Unknown(err)
- }
- }
-}
diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go
deleted file mode 100644
index 30ea7e6fec2..00000000000
--- a/vendor/github.com/docker/docker/errdefs/is.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package errdefs
-
-import (
- "context"
- "errors"
-)
-
-type causer interface {
- Cause() error
-}
-
-type wrapErr interface {
- Unwrap() error
-}
-
-func getImplementer(err error) error {
- switch e := err.(type) {
- case
- ErrNotFound,
- ErrInvalidParameter,
- ErrConflict,
- ErrUnauthorized,
- ErrUnavailable,
- ErrForbidden,
- ErrSystem,
- ErrNotModified,
- ErrNotImplemented,
- ErrCancelled,
- ErrDeadline,
- ErrDataLoss,
- ErrUnknown:
- return err
- case causer:
- return getImplementer(e.Cause())
- case wrapErr:
- return getImplementer(e.Unwrap())
- default:
- return err
- }
-}
-
-// IsNotFound returns if the passed in error is an [ErrNotFound],
-func IsNotFound(err error) bool {
- _, ok := getImplementer(err).(ErrNotFound)
- return ok
-}
-
-// IsInvalidParameter returns if the passed in error is an [ErrInvalidParameter].
-func IsInvalidParameter(err error) bool {
- _, ok := getImplementer(err).(ErrInvalidParameter)
- return ok
-}
-
-// IsConflict returns if the passed in error is an [ErrConflict].
-func IsConflict(err error) bool {
- _, ok := getImplementer(err).(ErrConflict)
- return ok
-}
-
-// IsUnauthorized returns if the passed in error is an [ErrUnauthorized].
-func IsUnauthorized(err error) bool {
- _, ok := getImplementer(err).(ErrUnauthorized)
- return ok
-}
-
-// IsUnavailable returns if the passed in error is an [ErrUnavailable].
-func IsUnavailable(err error) bool {
- _, ok := getImplementer(err).(ErrUnavailable)
- return ok
-}
-
-// IsForbidden returns if the passed in error is an [ErrForbidden].
-func IsForbidden(err error) bool {
- _, ok := getImplementer(err).(ErrForbidden)
- return ok
-}
-
-// IsSystem returns if the passed in error is an [ErrSystem].
-func IsSystem(err error) bool {
- _, ok := getImplementer(err).(ErrSystem)
- return ok
-}
-
-// IsNotModified returns if the passed in error is an [ErrNotModified].
-func IsNotModified(err error) bool {
- _, ok := getImplementer(err).(ErrNotModified)
- return ok
-}
-
-// IsNotImplemented returns if the passed in error is an [ErrNotImplemented].
-func IsNotImplemented(err error) bool {
- _, ok := getImplementer(err).(ErrNotImplemented)
- return ok
-}
-
-// IsUnknown returns if the passed in error is an [ErrUnknown].
-func IsUnknown(err error) bool {
- _, ok := getImplementer(err).(ErrUnknown)
- return ok
-}
-
-// IsCancelled returns if the passed in error is an [ErrCancelled].
-func IsCancelled(err error) bool {
- _, ok := getImplementer(err).(ErrCancelled)
- return ok
-}
-
-// IsDeadline returns if the passed in error is an [ErrDeadline].
-func IsDeadline(err error) bool {
- _, ok := getImplementer(err).(ErrDeadline)
- return ok
-}
-
-// IsDataLoss returns if the passed in error is an [ErrDataLoss].
-func IsDataLoss(err error) bool {
- _, ok := getImplementer(err).(ErrDataLoss)
- return ok
-}
-
-// IsContext returns if the passed in error is due to context cancellation or deadline exceeded.
-func IsContext(err error) bool {
- return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
-}
diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go
index cf4d6a59574..e899f4de85c 100644
--- a/vendor/github.com/docker/docker/internal/multierror/multierror.go
+++ b/vendor/github.com/docker/docker/internal/multierror/multierror.go
@@ -36,7 +36,7 @@ func (e *joinError) Error() string {
}
stringErrs := make([]string, 0, len(e.errs))
for _, subErr := range e.errs {
- stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1))
+ stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t"))
}
return "* " + strings.Join(stringErrs, "\n* ")
}
diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go
index 07450a2d708..1438814d63b 100644
--- a/vendor/github.com/docker/docker/pkg/progress/progressreader.go
+++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go
@@ -31,7 +31,7 @@ func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action stri
}
}
-func (p *Reader) Read(buf []byte) (n int, err error) {
+func (p *Reader) Read(buf []byte) (int, error) {
read, err := p.in.Read(buf)
p.current += int64(read)
updateEvery := int64(1024 * 512) // 512kB
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
index 854e4c37181..d4376138a23 100644
--- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -91,12 +91,12 @@ func NewStdWriter(w io.Writer, t StdType) io.Writer {
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
-func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, _ error) {
var (
buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
- er, ew error
+ err error
out io.Writer
frameSize int
)
@@ -105,16 +105,16 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
// Make sure we have at least a full header
for nr < stdWriterPrefixLen {
var nr2 int
- nr2, er = src.Read(buf[nr:])
+ nr2, err = src.Read(buf[nr:])
nr += nr2
- if er == io.EOF {
+ if err == io.EOF {
if nr < stdWriterPrefixLen {
return written, nil
}
break
}
- if er != nil {
- return 0, er
+ if err != nil {
+ return 0, err
}
}
@@ -151,16 +151,16 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
- nr2, er = src.Read(buf[nr:])
+ nr2, err = src.Read(buf[nr:])
nr += nr2
- if er == io.EOF {
+ if err == io.EOF {
if nr < frameSize+stdWriterPrefixLen {
return written, nil
}
break
}
- if er != nil {
- return 0, er
+ if err != nil {
+ return 0, err
}
}
@@ -171,9 +171,9 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
}
// Write the retrieved frame (without header)
- nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
- if ew != nil {
- return 0, ew
+ nw, err = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
+ if err != nil {
+ return 0, err
}
// If the frame has not been fully written: error
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
index 0cffafa7bf9..0ed62c1a180 100644
--- a/vendor/github.com/go-logr/logr/.golangci.yaml
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -1,26 +1,28 @@
+version: "2"
+
run:
timeout: 1m
tests: true
linters:
- disable-all: true
- enable:
+ default: none
+ enable: # please keep this alphabetized
+ - asasalint
- asciicheck
+ - copyloopvar
+ - dupl
- errcheck
- forcetypeassert
+ - goconst
- gocritic
- - gofmt
- - goimports
- - gosimple
- govet
- ineffassign
- misspell
+ - musttag
- revive
- staticcheck
- - typecheck
- unused
issues:
- exclude-use-default: false
max-issues-per-linter: 0
max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
index 30568e768dc..b22c57d7137 100644
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
write: fn,
}
// For skipping fnlogger.Info and fnlogger.Error.
- l.Formatter.AddCallDepth(1)
+ l.AddCallDepth(1) // via Formatter
return l
}
@@ -164,17 +164,17 @@ type fnlogger struct {
}
func (l fnlogger) WithName(name string) logr.LogSink {
- l.Formatter.AddName(name)
+ l.AddName(name) // via Formatter
return &l
}
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
- l.Formatter.AddValues(kvList)
+ l.AddValues(kvList) // via Formatter
return &l
}
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
- l.Formatter.AddCallDepth(depth)
+ l.AddCallDepth(depth) // via Formatter
return &l
}
diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml
index 22f8d21cca1..500630621fb 100644
--- a/vendor/github.com/go-openapi/strfmt/.golangci.yml
+++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml
@@ -1,61 +1,62 @@
-linters-settings:
- govet:
- check-shadowing: true
- golint:
- min-confidence: 0
- gocyclo:
- min-complexity: 45
- maligned:
- suggest-new: true
- dupl:
- threshold: 200
- goconst:
- min-len: 2
- min-occurrences: 3
-
+version: "2"
linters:
- enable-all: true
+ default: all
disable:
- - maligned
- - unparam
- - lll
- - gochecknoinits
- - gochecknoglobals
+ - cyclop
+ - depguard
+ - errchkjson
+ - errorlint
+ - exhaustruct
+ - forcetypeassert
- funlen
- - godox
+ - gochecknoglobals
+ - gochecknoinits
- gocognit
- - whitespace
- - wsl
- - wrapcheck
- - testpackage
- - nlreturn
- - gomnd
- - exhaustivestruct
- - goerr113
- - errorlint
- - nestif
- godot
- - gofumpt
+ - godox
+ - gosmopolitan
+ - inamedparam
+ - ireturn
+ - lll
+ - musttag
+ - nestif
+ - nlreturn
+ - nonamedreturns
- paralleltest
- - tparallel
+ - testpackage
- thelper
- - ifshort
- - exhaustruct
+ - tparallel
+ - unparam
- varnamelen
- - gci
- - depguard
- - errchkjson
- - inamedparam
- - nonamedreturns
- - musttag
- - ireturn
- - forcetypeassert
- - cyclop
- # deprecated linters
- - deadcode
- - interfacer
- - scopelint
- - varcheck
- - structcheck
- - golint
- - nosnakecase
+ - whitespace
+ - wrapcheck
+ - wsl
+ settings:
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+ gocyclo:
+ min-complexity: 45
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go
index cfa9a526feb..685eaf63cbe 100644
--- a/vendor/github.com/go-openapi/strfmt/bson.go
+++ b/vendor/github.com/go-openapi/strfmt/bson.go
@@ -83,7 +83,7 @@ func (id *ObjectId) Scan(raw interface{}) error {
case string:
data = []byte(v)
default:
- return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v)
+ return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v: %w", v, ErrFormat)
}
return id.UnmarshalText(data)
diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go
index 3c93381c7cc..a8f52ff354d 100644
--- a/vendor/github.com/go-openapi/strfmt/date.go
+++ b/vendor/github.com/go-openapi/strfmt/date.go
@@ -17,7 +17,6 @@ package strfmt
import (
"database/sql/driver"
"encoding/json"
- "errors"
"fmt"
"time"
@@ -84,7 +83,7 @@ func (d *Date) Scan(raw interface{}) error {
*d = Date{}
return nil
default:
- return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v)
+ return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v: %w", v, ErrFormat)
}
}
@@ -134,7 +133,7 @@ func (d *Date) UnmarshalBSON(data []byte) error {
return nil
}
- return errors.New("couldn't unmarshal bson bytes value as Date")
+ return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat)
}
// DeepCopyInto copies the receiver and writes its value into out.
diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go
index 2813714060e..0c9514dbd54 100644
--- a/vendor/github.com/go-openapi/strfmt/default.go
+++ b/vendor/github.com/go-openapi/strfmt/default.go
@@ -18,46 +18,23 @@ import (
"database/sql/driver"
"encoding/base64"
"encoding/json"
- "errors"
"fmt"
"net/mail"
- "regexp"
+ "net/netip"
+ "strconv"
"strings"
"github.com/asaskevich/govalidator"
"github.com/google/uuid"
"go.mongodb.org/mongo-driver/bson"
+ "golang.org/x/net/idna"
)
const (
- // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114
- // A string instance is valid against this attribute if it is a valid
- // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034].
- // http://tools.ietf.org/html/rfc1034#section-3.5
- // ::= any one of the ten digits 0 through 9
- // var digit = /[0-9]/;
- // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case
- // var letter = /[a-zA-Z]/;
- // ::= |
- // var letDig = /[0-9a-zA-Z]/;
- // ::= | "-"
- // var letDigHyp = /[-0-9a-zA-Z]/;
- // ::= |
- // var ldhStr = /[-0-9a-zA-Z]+/;
- //